From f6cd28224d42332c7151e0256497e27b89369363 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 5 Apr 2022 23:31:50 +0200 Subject: [PATCH 001/435] wip: support for multiple db drivers --- Cargo.lock | 980 +++++++++++++++++++++++++++++++++++++++-- Cargo.toml | 2 + src/database.rs | 217 ++------- src/lib.rs | 3 +- src/sqlite_database.rs | 186 ++++++++ src/tracker.rs | 29 +- 6 files changed, 1202 insertions(+), 215 deletions(-) create mode 100644 src/sqlite_database.rs diff --git a/Cargo.lock b/Cargo.lock index 4279ad5b9..bf0fed8f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,23 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +dependencies = [ + "getrandom 0.2.3", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" version = "0.7.18" @@ -11,6 +28,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + [[package]] name = "aquatic_udp_protocol" version = "0.1.0" @@ -26,30 +52,111 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + +[[package]] +name = "async-trait" +version = "0.1.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + [[package]] name = "autocfg" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +[[package]] +name = "base-x" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" + [[package]] name = "base64" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "bigdecimal" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256" +dependencies = [ + "num-bigint 0.3.3", + "num-integer", + "num-traits 0.2.14", + "serde 1.0.120", +] + [[package]] name = "binascii" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" +[[package]] +name = "bindgen" +version = "0.58.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f8523b410d7187a43085e7e064416ea32ded16bd0a4e6fc025e21616d01258f" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "clap", + "env_logger", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "which", +] + [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +[[package]] +name = "bitvec" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + [[package]] name = "block-buffer" version = "0.7.3" @@ -90,6 +197,12 @@ dependencies = [ "safemem", ] +[[package]] +name = "bufstream" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" + [[package]] name = "bumpalo" version = "3.8.0" @@ -116,9 +229,18 @@ checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" [[package]] name = "cc" -version = "1.0.66" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" + +[[package]] +name = "cexpr" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +dependencies = [ + "nom", +] [[package]] name = "cfg-if" @@ -141,10 +263,46 @@ dependencies = [ "libc", "num-integer", "num-traits 0.2.14", - "time", + "serde 1.0.120", + "time 0.1.44", "winapi", ] +[[package]] +name = "clang-sys" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cc00842eed744b858222c4c9faf7243aafc6d33f92f96935263ef4d8a41ce21" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "cmake" +version = "0.1.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" +dependencies = [ + "cc", +] + [[package]] name = "config" version = "0.11.0" @@ -161,12 +319,34 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "const_fn" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" + [[package]] name = "convert_case" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + [[package]] name = "cpufeatures" version = "0.2.1" @@ -176,6 +356,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if 1.0.0", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -185,7 +374,18 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version", + "rustc_version 0.4.0", + "syn", +] + +[[package]] +name = "derive_utils" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "532b4c15dccee12c7044f1fcad956e98410860b22231e44a3b827464797ca7bf" +dependencies = [ + "proc-macro2", + "quote", "syn", ] @@ -207,12 +407,31 @@ dependencies = [ "generic-array 0.14.4", ] +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + [[package]] name = "either" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +[[package]] +name = "env_logger" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + [[package]] name = "fake-simd" version = "0.1.2" @@ -240,12 +459,40 @@ dependencies = [ "log", ] +[[package]] +name = "flate2" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +dependencies = [ + "cfg-if 1.0.0", + "crc32fast", + "libc", + "libz-sys", + "miniz_oxide", +] + [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.0.0" @@ -256,6 +503,76 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "frunk" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cd67cf7d54b7e72d0ea76f3985c3747d74aee43e0218ad993b7903ba7a5395e" +dependencies = [ + "frunk_core", + "frunk_derives", + "frunk_proc_macros", +] + +[[package]] +name = "frunk_core" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1246cf43ec80bf8b2505b5c360b8fb999c97dabd17dbb604d85558d5cbc25482" + +[[package]] +name = "frunk_derives" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dbc4f084ec5a3f031d24ccedeb87ab2c3189a2f33b8d070889073837d5ea09e" +dependencies = [ + "frunk_proc_macro_helpers", + "quote", + "syn", +] + +[[package]] +name = "frunk_proc_macro_helpers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99f11257f106c6753f5ffcb8e601fb39c390a088017aaa55b70c526bff15f63e" +dependencies = [ + "frunk_core", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frunk_proc_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a078bd8459eccbb85e0b007b8f756585762a72a9efc53f359b371c3b6351dbcc" +dependencies = [ + "frunk_core", + "frunk_proc_macros_impl", + "proc-macro-hack", +] + +[[package]] +name = "frunk_proc_macros_impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ffba99f0fa4f57e42f57388fbb9a0ca863bc2b4261f3c5570fed579d5df6c32" +dependencies = [ + "frunk_core", + "frunk_proc_macro_helpers", + "proc-macro-hack", + "quote", + "syn", +] + +[[package]] +name = "funty" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" + [[package]] name = "futures" version = "0.3.21" @@ -386,6 +703,12 @@ dependencies = [ "wasi 0.10.0+wasi-snapshot-preview1", ] +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + [[package]] name = "h2" version = "0.3.4" @@ -411,6 +734,15 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +[[package]] +name = "hashbrown" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +dependencies = [ + "ahash", +] + [[package]] name = "headers" version = "0.3.3" @@ -424,7 +756,7 @@ dependencies = [ "http", "mime", "sha-1 0.8.2", - "time", + "time 0.1.44", ] [[package]] @@ -484,6 +816,12 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" version = "0.14.2" @@ -501,7 +839,7 @@ dependencies = [ "httpdate", "itoa", "pin-project", - "socket2", + "socket2 0.3.19", "tokio", "tower-service", "tracing", @@ -526,7 +864,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.9.1", ] [[package]] @@ -547,6 +885,18 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "io-enum" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03e3306b0f260aad2872563eb0d5d1a59f2420fad270a661dce59a01e92d806b" +dependencies = [ + "autocfg", + "derive_utils", + "quote", + "syn", +] + [[package]] name = "itoa" version = "0.4.7" @@ -568,13 +918,29 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "lexical" +version = "5.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f404a90a744e32e8be729034fc33b90cf2a56418fbf594d69aa3c0214ad414e5" +dependencies = [ + "cfg-if 1.0.0", + "lexical-core", +] + [[package]] name = "lexical-core" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" dependencies = [ - "arrayvec", + "arrayvec 0.5.2", "bitflags", "cfg-if 1.0.0", "ryu", @@ -583,9 +949,19 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.101" +version = "0.2.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad5c14e80759d0939d013e6ca49930e59fc53dd8e5009132f76240c179380c09" + +[[package]] +name = "libloading" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" +checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +dependencies = [ + "cfg-if 1.0.0", + "winapi", +] [[package]] name = "libsqlite3-sys" @@ -597,6 +973,17 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "libz-sys" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f35facd4a5673cb5a48822be2be1d4236c1c99cb4113cab7061ac720d5bf859" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "linked-hash-map" version = "0.5.4" @@ -621,6 +1008,15 @@ dependencies = [ "cfg-if 0.1.10", ] +[[package]] +name = "lru" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" +dependencies = [ + "hashbrown 0.11.2", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -642,6 +1038,15 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.16" @@ -658,6 +1063,16 @@ dependencies = [ "unicase", ] +[[package]] +name = "miniz_oxide" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +dependencies = [ + "adler", + "autocfg", +] + [[package]] name = "mio" version = "0.7.7" @@ -677,7 +1092,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ - "socket2", + "socket2 0.3.19", "winapi", ] @@ -699,6 +1114,109 @@ dependencies = [ "twoway", ] +[[package]] +name = "mysql" +version = "21.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06f5abe1c0f91831afd4d35298c08d958e80144869757b913891e5b0d00c2c96" +dependencies = [ + "bufstream", + "bytes", + "io-enum", + "libc", + "lru", + "mysql_common", + "named_pipe", + "native-tls", + "nix", + "once_cell", + "pem", + "percent-encoding", + "serde 1.0.120", + "serde_json", + "socket2 0.4.4", + "twox-hash", + "url", +] + +[[package]] +name = "mysql_common" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02fa08ec695a40ed899b1239e81d0d74de5b40802d4fc8b513e2c541717c434e" +dependencies = [ + "base64", + "bigdecimal", + "bindgen", + "bitflags", + "bitvec", + "byteorder", + "bytes", + "cc", + "chrono", + "cmake", + "crc32fast", + "flate2", + "frunk", + "lazy_static", + "lexical", + "num-bigint 0.4.3", + "num-traits 0.2.14", + "rand 0.8.4", + "regex", + "rust_decimal", + "saturating", + "serde 1.0.120", + "serde_json", + "sha1", + "sha2", + "smallvec", + "subprocess", + "thiserror", + "time 0.2.27", + "uuid", +] + +[[package]] +name = "named_pipe" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad9c443cce91fc3e12f017290db75dde490d685cdaaf508d7159d7cf41f0eb2b" +dependencies = [ + "winapi", +] + +[[package]] +name = "native-tls" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nix" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +dependencies = [ + "bitflags", + "cc", + "cfg-if 1.0.0", + "libc", + "memoffset", +] + [[package]] name = "nom" version = "5.1.2" @@ -719,6 +1237,28 @@ dependencies = [ "winapi", ] +[[package]] +name = "num-bigint" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" +dependencies = [ + "autocfg", + "num-integer", + "num-traits 0.2.14", +] + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits 0.2.14", +] + [[package]] name = "num-integer" version = "0.1.44" @@ -758,22 +1298,55 @@ dependencies = [ ] [[package]] -name = "once_cell" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" - -[[package]] -name = "opaque-debug" -version = "0.2.3" +name = "once_cell" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "openssl" +version = "0.10.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" +dependencies = [ + "bitflags", + "cfg-if 1.0.0", + "foreign-types", + "libc", + "once_cell", + "openssl-sys", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "opaque-debug" -version = "0.3.0" +name = "openssl-sys" +version = "0.9.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] [[package]] name = "parking_lot" @@ -800,6 +1373,23 @@ dependencies = [ "winapi", ] +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + +[[package]] +name = "pem" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +dependencies = [ + "base64", + "once_cell", + "regex", +] + [[package]] name = "percent-encoding" version = "2.1.0" @@ -850,6 +1440,12 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + [[package]] name = "proc-macro2" version = "1.0.24" @@ -885,6 +1481,16 @@ dependencies = [ "scheduled-thread-pool", ] +[[package]] +name = "r2d2_mysql" +version = "21.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d05145690b395f5515feff202b8f4b9429c500f423ef7129175155c3c3a9e2" +dependencies = [ + "mysql", + "r2d2", +] + [[package]] name = "r2d2_sqlite" version = "0.16.0" @@ -895,6 +1501,12 @@ dependencies = [ "rusqlite", ] +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + [[package]] name = "rand" version = "0.7.3" @@ -1039,7 +1651,7 @@ dependencies = [ "lru-cache", "memchr", "smallvec", - "time", + "time 0.1.44", ] [[package]] @@ -1048,13 +1660,39 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" +[[package]] +name = "rust_decimal" +version = "1.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d37baa70cf8662d2ba1c1868c5983dda16ef32b105cce41fb5c47e72936a90b3" +dependencies = [ + "arrayvec 0.7.2", + "num-traits 0.2.14", + "serde 1.0.120", +] + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver 0.9.0", +] + [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver", + "semver 1.0.4", ] [[package]] @@ -1082,6 +1720,22 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" +[[package]] +name = "saturating" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" + +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi", +] + [[package]] name = "scheduled-thread-pool" version = "0.2.5" @@ -1113,12 +1767,50 @@ dependencies = [ "untrusted", ] +[[package]] +name = "security-framework" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + [[package]] name = "serde" version = "0.8.23" @@ -1224,6 +1916,40 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sha1" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +dependencies = [ + "sha1_smol", +] + +[[package]] +name = "sha1_smol" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" + +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + [[package]] name = "signal-hook-registry" version = "1.3.0" @@ -1256,18 +1982,102 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "standback" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" +dependencies = [ + "version_check", +] + [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version 0.2.3", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2", + "quote", + "serde 1.0.120", + "serde_derive", + "syn", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2", + "quote", + "serde 1.0.120", + "serde_derive", + "serde_json", + "sha1", + "syn", +] + +[[package]] +name = "stdweb-internal-runtime" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "subprocess" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055cf3ebc2981ad8f0a5a17ef6652f652d87831f79fddcba2ac57bcb9a0aa407" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "syn" version = "1.0.67" @@ -1279,6 +2089,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tempfile" version = "3.2.0" @@ -1293,6 +2109,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + [[package]] name = "thiserror" version = "1.0.26" @@ -1324,6 +2158,44 @@ dependencies = [ "winapi", ] +[[package]] +name = "time" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" +dependencies = [ + "const_fn", + "libc", + "standback", + "stdweb", + "time-macros", + "version_check", + "winapi", +] + +[[package]] +name = "time-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "standback", + "syn", +] + [[package]] name = "tinyvec" version = "1.3.1" @@ -1432,6 +2304,7 @@ name = "torrust-tracker" version = "2.2.0" dependencies = [ "aquatic_udp_protocol", + "async-trait", "binascii", "byteorder", "chrono", @@ -1443,6 +2316,7 @@ dependencies = [ "log", "percent-encoding", "r2d2", + "r2d2_mysql", "r2d2_sqlite", "rand 0.8.4", "serde 1.0.120", @@ -1516,6 +2390,17 @@ dependencies = [ "memchr", ] +[[package]] +name = "twox-hash" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" +dependencies = [ + "cfg-if 1.0.0", + "rand 0.8.4", + "static_assertions", +] + [[package]] name = "typenum" version = "1.12.0" @@ -1546,6 +2431,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-width" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" + [[package]] name = "unicode-xid" version = "0.2.1" @@ -1576,12 +2467,24 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" + [[package]] name = "vcpkg" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + [[package]] name = "version_check" version = "0.9.2" @@ -1714,6 +2617,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "which" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +dependencies = [ + "libc", +] + [[package]] name = "winapi" version = "0.3.9" @@ -1730,12 +2642,30 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "wyz" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "129e027ad65ce1453680623c3fb5163cbf7107bfe1aa32257e7d0e63f9ced188" +dependencies = [ + "tap", +] + [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/Cargo.toml b/Cargo.toml index 084a7cfb5..b29bf09f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ log = {version = "0.4", features = ["release_max_level_info"]} fern = "0.6" chrono = "0.4" byteorder = "1" +r2d2_mysql = "21.0.0" r2d2_sqlite = "0.16.0" r2d2 = "0.8.8" rand = "0.8.4" @@ -32,3 +33,4 @@ derive_more = "0.99" thiserror = "1.0" aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" +async-trait = "0.1.52" diff --git a/src/database.rs b/src/database.rs index 82d26d6d8..63d06c4f9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,196 +1,63 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry}; -use log::debug; -use r2d2_sqlite::{SqliteConnectionManager, rusqlite}; -use r2d2::{Pool}; -use r2d2_sqlite::rusqlite::NO_PARAMS; +use crate::{InfoHash, TorrentEntry}; use crate::key_manager::AuthKey; -use std::str::FromStr; +use crate::sqlite_database::SqliteDatabase; +use async_trait::async_trait; +use derive_more::{Display, Error}; -pub struct SqliteDatabase { - pool: Pool +pub enum DatabaseDrivers { + Sqlite3, + MySQL } -impl SqliteDatabase { - pub fn new(db_path: &str) -> Result { - let sqlite_connection_manager = SqliteConnectionManager::file(db_path); - let sqlite_pool = r2d2::Pool::new(sqlite_connection_manager).expect("Failed to create r2d2 SQLite connection pool."); - let sqlite_database = SqliteDatabase { - pool: sqlite_pool - }; - - if let Err(error) = SqliteDatabase::create_database_tables(&sqlite_database.pool) { - return Err(error) - }; - - Ok(sqlite_database) - } - - pub fn create_database_tables(pool: &Pool) -> Result { - let create_whitelist_table = " - CREATE TABLE IF NOT EXISTS whitelist ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE - );".to_string(); - - let create_torrents_table = " - CREATE TABLE IF NOT EXISTS torrents ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE, - completed INTEGER DEFAULT 0 NOT NULL - );".to_string(); - - let create_keys_table = format!(" - CREATE TABLE IF NOT EXISTS keys ( - id integer PRIMARY KEY AUTOINCREMENT, - key VARCHAR({}) NOT NULL UNIQUE, - valid_until INT(10) NOT NULL - );", AUTH_KEY_LENGTH as i8); - - let conn = pool.get().unwrap(); - match conn.execute(&create_whitelist_table, NO_PARAMS) { - Ok(updated) => { - match conn.execute(&create_keys_table, NO_PARAMS) { - Ok(updated2) => { - match conn.execute(&create_torrents_table, NO_PARAMS) { - Ok(updated3) => Ok(updated + updated2 + updated3), - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - Err(e) => { - debug!("{:?}", e); - Err(e) - } +pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result { + match db_driver { + DatabaseDrivers::Sqlite3 => { + let db = SqliteDatabase::new(db_path)?; + Ok(db) } - } - - pub async fn load_persistent_torrent_data(&self) -> Result, rusqlite::Error> { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; - - let torrent_iter = stmt.query_map(NO_PARAMS, |row| { - let info_hash_string: String = row.get(0)?; - let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); - let completed: u32 = row.get(1)?; - Ok((info_hash, completed)) - })?; - - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok() ).collect(); - - Ok(torrents) - } - - pub async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), rusqlite::Error> { - let mut conn = self.pool.get().unwrap(); - let db_transaction = conn.transaction()?; - - for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + _ => { + let db = SqliteDatabase::new(db_path)?; + Ok(db) } - - let _ = db_transaction.commit(); - - Ok(()) } +} - pub async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; - let mut rows = stmt.query(&[info_hash])?; +#[async_trait] +pub trait Database: Sync + Send { + fn create_database_tables(&self) -> Result; - if let Some(row) = rows.next()? { - let info_hash: String = row.get(0).unwrap(); + async fn load_persistent_torrent_data(&self) -> Result, Error>; - // should never be able to fail - Ok(InfoHash::from_str(&info_hash).unwrap()) - } else { - Err(rusqlite::Error::QueryReturnedNoRows) - } - } + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), Error>; - pub async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; - pub async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; - pub async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; - let mut rows = stmt.query(&[key.to_string()])?; + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - if let Some(row) = rows.next()? { - let key: String = row.get(0).unwrap(); - let valid_until_i64: i64 = row.get(1).unwrap(); + async fn get_key_from_keys(&self, key: &str) -> Result; - Ok(AuthKey { - key, - valid_until: Some(valid_until_i64 as u64) - }) - } else { - Err(rusqlite::Error::QueryReturnedNoRows) - } - } + async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result; - pub async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()] - ) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } + async fn remove_key_from_keys(&self, key: String) -> Result; +} + +#[derive(Debug, Display, PartialEq, Error)] +#[allow(dead_code)] +pub enum Error { + #[display(fmt = "Query returned no rows.")] + QueryReturnedNoRows, + #[display(fmt = "Invalid query.")] + InvalidQuery, +} - pub async fn remove_key_from_keys(&self, key: String) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } +impl From for Error { + fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { + match e { + r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, + _ => Error::InvalidQuery } } } diff --git a/src/lib.rs b/src/lib.rs index c055cfae4..a9692ac66 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,11 +3,12 @@ pub mod tracker; pub mod http_api_server; pub mod common; pub mod utils; -pub mod database; +pub mod sqlite_database; pub mod key_manager; pub mod logging; pub mod torrust_udp_tracker; pub mod torrust_http_tracker; +pub mod database; pub use self::config::*; pub use torrust_udp_tracker::server::*; diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs new file mode 100644 index 000000000..5bd081c6f --- /dev/null +++ b/src/sqlite_database.rs @@ -0,0 +1,186 @@ +use std::collections::BTreeMap; +use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry, database}; +use log::debug; +use r2d2_sqlite::{SqliteConnectionManager}; +use r2d2::{Pool}; +use r2d2_sqlite::rusqlite::NO_PARAMS; +use crate::key_manager::AuthKey; +use std::str::FromStr; +use crate::database::Database; +use async_trait::async_trait; + +pub struct SqliteDatabase { + pool: Pool +} + +impl SqliteDatabase { + pub fn new(db_path: &str) -> Result { + let cm = SqliteConnectionManager::file(db_path); + let pool = Pool::new(cm).expect("Failed to create r2d2 SQLite connection pool."); + Ok(SqliteDatabase { + pool + }) + } +} + +#[async_trait] +impl Database for SqliteDatabase { + fn create_database_tables(&self) -> Result { + let create_whitelist_table = " + CREATE TABLE IF NOT EXISTS whitelist ( + id integer PRIMARY KEY AUTOINCREMENT, + info_hash VARCHAR(20) NOT NULL UNIQUE + );".to_string(); + + let create_torrents_table = " + CREATE TABLE IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTOINCREMENT, + info_hash VARCHAR(20) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + );".to_string(); + + let create_keys_table = format!(" + CREATE TABLE IF NOT EXISTS keys ( + id integer PRIMARY KEY AUTOINCREMENT, + key VARCHAR({}) NOT NULL UNIQUE, + valid_until INT(10) NOT NULL + );", AUTH_KEY_LENGTH as i8); + + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + conn.execute(&create_whitelist_table, NO_PARAMS) + .and_then(|_| conn.execute(&create_whitelist_table, NO_PARAMS)) + .and_then(|_| conn.execute(&create_keys_table, NO_PARAMS)) + .and_then(|_| conn.execute(&create_torrents_table, NO_PARAMS)) + .map_err(|_| database::Error::InvalidQuery) + } + + async fn load_persistent_torrent_data(&self) -> Result, database::Error> { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; + + let torrent_iter = stmt.query_map(NO_PARAMS, |row| { + let info_hash_string: String = row.get(0)?; + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + let completed: u32 = row.get(1)?; + Ok((info_hash, completed)) + })?; + + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok() ).collect(); + + Ok(torrents) + } + + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let db_transaction = conn.transaction()?; + + for (info_hash, torrent_entry) in torrents { + let (_seeders, completed, _leechers) = torrent_entry.get_stats(); + let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + } + + let _ = db_transaction.commit(); + + Ok(()) + } + + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; + let mut rows = stmt.query(&[info_hash])?; + + if let Some(row) = rows.next()? { + let info_hash: String = row.get(0).unwrap(); + + // should never be able to fail + Ok(InfoHash::from_str(&info_hash).unwrap()) + } else { + Err(database::Error::InvalidQuery) + } + } + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { + Ok(updated) => { + if updated > 0 { return Ok(updated) } + Err(database::Error::QueryReturnedNoRows) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { + Ok(updated) => { + if updated > 0 { return Ok(updated) } + Err(database::Error::QueryReturnedNoRows) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn get_key_from_keys(&self, key: &str) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; + let mut rows = stmt.query(&[key.to_string()])?; + + if let Some(row) = rows.next()? { + let key: String = row.get(0).unwrap(); + let valid_until_i64: i64 = row.get(1).unwrap(); + + Ok(AuthKey { + key, + valid_until: Some(valid_until_i64 as u64) + }) + } else { + Err(database::Error::QueryReturnedNoRows) + } + } + + async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()] + ) { + Ok(updated) => { + if updated > 0 { return Ok(updated) } + Err(database::Error::QueryReturnedNoRows) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn remove_key_from_keys(&self, key: String) -> Result { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { + Ok(updated) => { + if updated > 0 { return Ok(updated) } + Err(database::Error::QueryReturnedNoRows) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } +} diff --git a/src/tracker.rs b/src/tracker.rs index 98c5be730..24bd645d1 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -5,14 +5,14 @@ use std::collections::BTreeMap; use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use crate::common::{AnnounceEventDef, InfoHash, NumberOfBytesDef, PeerId}; use std::net::{IpAddr, SocketAddr}; -use crate::{Configuration, key_manager, MAX_SCRAPE_TORRENTS}; +use crate::{Configuration, database, key_manager, MAX_SCRAPE_TORRENTS}; use std::collections::btree_map::Entry; -use crate::database::SqliteDatabase; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use log::{debug}; use crate::key_manager::AuthKey; -use r2d2_sqlite::rusqlite; +use crate::database::{Database, DatabaseDrivers}; +use crate::key_manager::Error::KeyInvalid; use crate::torrust_http_tracker::AnnounceRequest; #[derive(Serialize, Deserialize, Clone, PartialEq)] @@ -265,18 +265,19 @@ pub struct TrackerStats { pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, - database: SqliteDatabase, + database: Box, stats: tokio::sync::RwLock, } impl TorrentTracker { - pub fn new(config: Arc) -> Result { - let database = SqliteDatabase::new(&config.db_path)?; + pub fn new(config: Arc) -> Result { + let db_driver = DatabaseDrivers::Sqlite3; + let database = database::connect_database(&db_driver, "data")?; Ok(TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), - database, + database: Box::new(database), stats: RwLock::new(TrackerStats { tcp4_connections_handled: 0, tcp4_announces_handled: 0, @@ -306,7 +307,7 @@ impl TorrentTracker { self.config.mode == TrackerMode::ListedMode || self.config.mode == TrackerMode::PrivateListedMode } - pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { + pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { let auth_key = key_manager::generate_auth_key(seconds_valid); // add key to database @@ -315,12 +316,12 @@ impl TorrentTracker { Ok(auth_key) } - pub async fn remove_auth_key(&self, key: String) -> Result { + pub async fn remove_auth_key(&self, key: String) -> Result { self.database.remove_key_from_keys(key).await } pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key_manager::Error> { - let db_key = self.database.get_key_from_keys(&auth_key.key).await?; + let db_key = self.database.get_key_from_keys(&auth_key.key).await.map_err(|_| KeyInvalid)?; key_manager::verify_auth_key(&db_key) } @@ -353,7 +354,7 @@ impl TorrentTracker { } // Loading the torrents into memory - pub async fn load_torrents(&self) -> Result<(), rusqlite::Error> { + pub async fn load_torrents(&self) -> Result<(), database::Error> { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { @@ -364,18 +365,18 @@ impl TorrentTracker { } // Saving the torrents from memory - pub async fn save_torrents(&self) -> Result<(), rusqlite::Error> { + pub async fn save_torrents(&self) -> Result<(), database::Error> { let torrents = self.torrents.read().await; self.database.save_persistent_torrent_data(&*torrents).await } // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { self.database.add_info_hash_to_whitelist(info_hash.clone()).await } // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result { + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result { self.database.remove_info_hash_from_whitelist(info_hash.clone()).await } From a9956381480bba15e00df08cab43b2e807e6a79f Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 10 Apr 2022 09:45:49 +0200 Subject: [PATCH 002/435] Fixing torrent cleanup, adding defragmentation of memory, and fixed response failure and bumping the version --- Cargo.lock | 2 +- Cargo.toml | 2 +- src/torrust_http_tracker/response.rs | 1 + src/tracker.rs | 76 +++++++++++++++------------- 4 files changed, 44 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bf0fed8f5..68f52fa18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2301,7 +2301,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.2.0" +version = "2.2.1" dependencies = [ "aquatic_udp_protocol", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index b29bf09f5..e3e57c378 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "torrust-tracker" -version = "2.2.0" +version = "2.2.1" license = "AGPL-3.0" authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs index d459a6fdd..af27bc5e9 100644 --- a/src/torrust_http_tracker/response.rs +++ b/src/torrust_http_tracker/response.rs @@ -86,6 +86,7 @@ impl ScrapeResponse { #[derive(Serialize)] pub struct ErrorResponse { + #[serde(rename = "failure reason")] pub failure_reason: String } diff --git a/src/tracker.rs b/src/tracker.rs index 24bd645d1..b79ffb6cf 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -9,7 +9,7 @@ use crate::{Configuration, database, key_manager, MAX_SCRAPE_TORRENTS}; use std::collections::btree_map::Entry; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use log::{debug}; +use log::info; use crate::key_manager::AuthKey; use crate::database::{Database, DatabaseDrivers}; use crate::key_manager::Error::KeyInvalid; @@ -456,47 +456,53 @@ impl TorrentTracker { self.stats.read().await } - // remove torrents without peers + // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { - debug!("Cleaning torrents.."); - let mut lock = self.torrents.write().await; - let db: &mut BTreeMap = &mut *lock; - let mut torrents_to_remove = Vec::new(); - - for (k, torrent_entry) in db.iter_mut() { - // timed-out peers.. - { - let mut peers_to_remove = Vec::new(); - let torrent_peers = &mut torrent_entry.peers; - - for (peer_id, peer) in torrent_peers.iter() { - if peer.is_seeder() { - if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { - // remove seeders after 5 minutes since last update... - peers_to_remove.push(peer_id.clone()); - torrent_entry.seeders -= 1; - } - } else if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { - // remove peers after 2 hours since last update... - peers_to_remove.push(peer_id.clone()); - } - } + info!("Cleaning torrents..."); + let lock = self.torrents.write().await; + + // First we create a mapping of all the torrent hashes in a vector, and we use this to iterate through the btreemap. + // Every hash we have handled, we remove from the btreemap completely, and push it to the top. + let mut torrent_hashes: Vec = Vec::new(); + for (k, _torrent_entry) in lock.iter() { + torrent_hashes.push(k.clone()); + } + + drop(lock); + + // Let's iterate through all torrents, and parse. + for hash in torrent_hashes.iter() { + let mut torrent = TorrentEntry{ + peers: BTreeMap::new(), + completed: 0, + seeders: 0 + }; - for peer_id in peers_to_remove.iter() { - torrent_peers.remove(peer_id); + let lock = self.torrents.write().await; + let torrent_data = lock.get(hash).unwrap().clone(); + drop(lock); + + torrent.completed = torrent_data.completed.clone(); + for (peer_id, peer) in torrent_data.peers.iter() { + if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { + continue; + } + torrent.peers.insert(peer_id.clone(), peer.clone()); + if peer.is_seeder() { + torrent.seeders += 1; } } - + let mut lock = self.torrents.write().await; + lock.remove(hash); if self.config.mode.clone() == TrackerMode::PublicMode && self.config.cleanup_peerless && !self.config.persistence { - // peer-less torrents.. - if torrent_entry.peers.len() == 0 { - torrents_to_remove.push(k.clone()); + if torrent.peers.len() != 0 { + lock.insert(hash.clone(), torrent); } + } else { + lock.insert(hash.clone(), torrent); } + drop(lock); } - - for info_hash in torrents_to_remove { - db.remove(&info_hash); - } + info!("Torrents cleaned up."); } } From 5baea30dfe2f9cb2f3005de63775622bf3bb3396 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 11 Apr 2022 01:01:03 +0200 Subject: [PATCH 003/435] feat: added MySQL database support --- src/config.rs | 3 + src/database.rs | 28 +++++-- src/lib.rs | 1 + src/mysql_database.rs | 183 +++++++++++++++++++++++++++++++++++++++++ src/sqlite_database.rs | 4 +- src/tracker.rs | 7 +- 6 files changed, 212 insertions(+), 14 deletions(-) create mode 100644 src/mysql_database.rs diff --git a/src/config.rs b/src/config.rs index 94b37464d..529ae07fe 100644 --- a/src/config.rs +++ b/src/config.rs @@ -8,6 +8,7 @@ use std::net::{IpAddr}; use std::path::Path; use std::str::FromStr; use config::{ConfigError, Config, File}; +use crate::database::DatabaseDrivers; #[derive(Serialize, Deserialize, PartialEq)] pub enum TrackerServer { @@ -50,6 +51,7 @@ pub struct HttpApiConfig { pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, + pub db_driver: DatabaseDrivers, pub db_path: String, pub persistence: bool, pub cleanup_interval: Option, @@ -132,6 +134,7 @@ impl Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: TrackerMode::PublicMode, + db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), persistence: false, cleanup_interval: Some(600), diff --git a/src/database.rs b/src/database.rs index 63d06c4f9..0c0e4a303 100644 --- a/src/database.rs +++ b/src/database.rs @@ -4,28 +4,38 @@ use crate::key_manager::AuthKey; use crate::sqlite_database::SqliteDatabase; use async_trait::async_trait; use derive_more::{Display, Error}; +use log::debug; +use crate::mysql_database::MysqlDatabase; +use serde::{Serialize, Deserialize}; +#[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { Sqlite3, MySQL } -pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result { - match db_driver { +pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result, r2d2::Error> { + debug!("{:?}", db_driver); + + let database: Box = match db_driver { DatabaseDrivers::Sqlite3 => { let db = SqliteDatabase::new(db_path)?; - Ok(db) + Box::new(db) } - _ => { - let db = SqliteDatabase::new(db_path)?; - Ok(db) + DatabaseDrivers::MySQL => { + let db = MysqlDatabase::new(db_path)?; + Box::new(db) } - } + }; + + database.create_database_tables().expect("Could not create database tables."); + + Ok(database) } #[async_trait] pub trait Database: Sync + Send { - fn create_database_tables(&self) -> Result; + fn create_database_tables(&self) -> Result<(), Error>; async fn load_persistent_torrent_data(&self) -> Result, Error>; @@ -51,6 +61,8 @@ pub enum Error { QueryReturnedNoRows, #[display(fmt = "Invalid query.")] InvalidQuery, + #[display(fmt = "Database error.")] + DatabaseError, } impl From for Error { diff --git a/src/lib.rs b/src/lib.rs index a9692ac66..addc7e1a1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ pub mod logging; pub mod torrust_udp_tracker; pub mod torrust_http_tracker; pub mod database; +pub mod mysql_database; pub use self::config::*; pub use torrust_udp_tracker::server::*; diff --git a/src/mysql_database.rs b/src/mysql_database.rs new file mode 100644 index 000000000..f6907f309 --- /dev/null +++ b/src/mysql_database.rs @@ -0,0 +1,183 @@ +use std::collections::BTreeMap; +use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry, database}; +use log::debug; +use r2d2::{Pool}; +use crate::key_manager::AuthKey; +use std::str::FromStr; +use crate::database::Database; +use async_trait::async_trait; +use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; +use r2d2_mysql::mysql::prelude::Queryable; +use r2d2_mysql::MysqlConnectionManager; + +pub struct MysqlDatabase { + pool: Pool +} + +impl MysqlDatabase { + pub fn new(db_path: &str) -> Result { + let opts = Opts::from_url(&db_path).expect("Failed to connect to MySQL database."); + let builder = OptsBuilder::from_opts(opts); + let manager = MysqlConnectionManager::new(builder); + let pool = r2d2::Pool::builder().build(manager).expect("Failed to create r2d2 MySQL connection pool."); + + Ok(Self { + pool + }) + } +} + +#[async_trait] +impl Database for MysqlDatabase { + fn create_database_tables(&self) -> Result<(), database::Error> { + let create_whitelist_table = " + CREATE TABLE IF NOT EXISTS whitelist ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(20) NOT NULL UNIQUE + );".to_string(); + + let create_torrents_table = " + CREATE TABLE IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(20) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + );".to_string(); + + let create_keys_table = format!(" + CREATE TABLE IF NOT EXISTS `keys` ( + `id` INT NOT NULL AUTO_INCREMENT, + `key` VARCHAR({}) NOT NULL, + `valid_until` INT(10) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE (`key`) + );", AUTH_KEY_LENGTH as i8); + + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + conn.query_drop(&create_torrents_table).expect("Could not create torrents table."); + conn.query_drop(&create_keys_table).expect("Could not create keys table."); + conn.query_drop(&create_whitelist_table).expect("Could not create whitelist table."); + + Ok(()) + } + + async fn load_persistent_torrent_data(&self) -> Result, database::Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + (info_hash, completed) + }).map_err(|_| database::Error::QueryReturnedNoRows)?; + + Ok(torrents) + } + + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; + + for (info_hash, torrent_entry) in torrents { + let (_seeders, completed, _leechers) = torrent_entry.get_stats(); + let _ = db_transaction.exec_drop("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", (info_hash.to_string(), completed.to_string())); + } + + let _ = db_transaction.commit(); + + Ok(()) + } + + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash => info_hash }) + .map_err(|_| database::Error::QueryReturnedNoRows)? { + Some(info_hash) => { + Ok(InfoHash::from_str(&info_hash).unwrap()) + }, + None => { + Err(database::Error::InvalidQuery) + } + } + } + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let info_hash_str = info_hash.to_string(); + + match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { + Ok(_) => { + Ok(1) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let info_hash = info_hash.to_string(); + + match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { + Ok(_) => { + Ok(1) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn get_key_from_keys(&self, key: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key => key }) + .map_err(|_| database::Error::QueryReturnedNoRows)? { + Some((key, valid_until)) => { + Ok(AuthKey { + key, + valid_until: Some(valid_until as u64) + }) + }, + None => { + Err(database::Error::InvalidQuery) + } + } + } + + async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + let key = auth_key.key.to_string(); + let valid_until = auth_key.valid_until.unwrap_or(0).to_string(); + + match conn.exec_drop("INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", params! { key, valid_until }) { + Ok(_) => { + Ok(1) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } + + async fn remove_key_from_keys(&self, key: String) -> Result { + let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + + match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { + Ok(_) => { + Ok(1) + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } + } +} diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index 5bd081c6f..c54a3c79c 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -25,7 +25,7 @@ impl SqliteDatabase { #[async_trait] impl Database for SqliteDatabase { - fn create_database_tables(&self) -> Result { + fn create_database_tables(&self) -> Result<(), database::Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTOINCREMENT, @@ -49,10 +49,10 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; conn.execute(&create_whitelist_table, NO_PARAMS) - .and_then(|_| conn.execute(&create_whitelist_table, NO_PARAMS)) .and_then(|_| conn.execute(&create_keys_table, NO_PARAMS)) .and_then(|_| conn.execute(&create_torrents_table, NO_PARAMS)) .map_err(|_| database::Error::InvalidQuery) + .map(|_| ()) } async fn load_persistent_torrent_data(&self) -> Result, database::Error> { diff --git a/src/tracker.rs b/src/tracker.rs index b79ffb6cf..1caccff23 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use log::info; use crate::key_manager::AuthKey; -use crate::database::{Database, DatabaseDrivers}; +use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; use crate::torrust_http_tracker::AnnounceRequest; @@ -271,13 +271,12 @@ pub struct TorrentTracker { impl TorrentTracker { pub fn new(config: Arc) -> Result { - let db_driver = DatabaseDrivers::Sqlite3; - let database = database::connect_database(&db_driver, "data")?; + let database = database::connect_database(&config.db_driver, &config.db_path)?; Ok(TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), - database: Box::new(database), + database, stats: RwLock::new(TrackerStats { tcp4_connections_handled: 0, tcp4_announces_handled: 0, From f407b9e686dd0a713c9901132ad52590d7885669 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 11 Apr 2022 23:45:38 +0200 Subject: [PATCH 004/435] fix: error when running in PrivateMode or PrivateListedMode --- src/config.rs | 27 ++++++++------------------- src/main.rs | 38 ++++++++++++++++++++++---------------- src/tracker.rs | 8 ++++---- 3 files changed, 34 insertions(+), 39 deletions(-) diff --git a/src/config.rs b/src/config.rs index 529ae07fe..f9166e577 100644 --- a/src/config.rs +++ b/src/config.rs @@ -16,18 +16,17 @@ pub enum TrackerServer { HTTP } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Debug)] pub struct UdpTrackerConfig { pub enabled: bool, pub bind_address: String, } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Debug)] pub struct HttpTrackerConfig { pub enabled: bool, pub bind_address: String, pub ssl_enabled: bool, - pub ssl_bind_address: String, #[serde(serialize_with = "none_as_empty_string")] pub ssl_cert_path: Option, #[serde(serialize_with = "none_as_empty_string")] @@ -35,8 +34,11 @@ pub struct HttpTrackerConfig { } impl HttpTrackerConfig { - pub fn is_ssl_enabled(&self) -> bool { - self.ssl_enabled && self.ssl_cert_path.is_some() && self.ssl_key_path.is_some() + pub fn verify_ssl_cert_and_key_set(&self) -> bool { + self.ssl_cert_path.is_some() + && self.ssl_key_path.is_some() + && !self.ssl_cert_path.as_ref().unwrap().is_empty() + && !self.ssl_key_path.as_ref().unwrap().is_empty() } } @@ -163,7 +165,6 @@ impl Configuration { enabled: false, bind_address: String::from("0.0.0.0:6969"), ssl_enabled: false, - ssl_bind_address: String::from("0.0.0.0:6868"), ssl_cert_path: None, ssl_key_path: None } @@ -171,15 +172,6 @@ impl Configuration { configuration } - pub fn verify(&self) -> Result<(), ConfigurationError> { - // UDP is not secure for sending private keys - if self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode { - return Err(ConfigurationError::TrackerModeIncompatible) - } - - Ok(()) - } - pub fn load_from_file() -> Result { let mut config = Config::new(); @@ -197,10 +189,7 @@ impl Configuration { let torrust_config: Configuration = config.try_into().map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; - match torrust_config.verify() { - Ok(_) => Ok(torrust_config), - Err(e) => Err(ConfigError::Message(format!("Errors while processing config: {}.", e))) - } + Ok(torrust_config) } pub fn save_to_file(&self) -> Result<(), ()>{ diff --git a/src/main.rs b/src/main.rs index 08610d24a..721385760 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,6 @@ use std::net::SocketAddr; use std::sync::Arc; -use log::info; +use log::{info}; use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; use torrust_tracker::torrust_http_tracker::server::HttpServer; @@ -39,24 +39,33 @@ async fn main() { let _api_server = start_api_server(&config.http_api, tracker.clone()); } + // used to send graceful shutdown signal to udp listeners let (tx, rx) = tokio::sync::watch::channel(false); let mut udp_server_handles = Vec::new(); // start the udp blocks for udp_tracker in &config.udp_trackers { - // used to send kill signal to thread + if !udp_tracker.enabled { continue; } - if udp_tracker.enabled { - udp_server_handles.push( - start_udp_tracker_server(&udp_tracker, tracker.clone(), rx.clone()).await - ) + if tracker.is_private() { + panic!("Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", udp_tracker.bind_address, config.mode); } + + udp_server_handles.push( + start_udp_tracker_server(&udp_tracker, tracker.clone(), rx.clone()).await + ) } // start the http blocks for http_tracker in &config.http_trackers { - let _ = start_http_tracker_server(&http_tracker, tracker.clone(), true); - let _ = start_http_tracker_server(&http_tracker, tracker.clone(), false); + if !http_tracker.enabled { continue; } + + // SSL requires a cert and a key + if http_tracker.ssl_enabled && !http_tracker.verify_ssl_cert_and_key_set() { + panic!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", http_tracker.bind_address); + } + + let _ = start_http_tracker_server(&http_tracker, tracker.clone()); } // handle the signals here @@ -110,22 +119,19 @@ fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> Joi }) } -fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc, ssl: bool) -> JoinHandle<()> { +fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let http_tracker = HttpServer::new(tracker); - let enabled = config.enabled; let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; - let ssl_bind_addr = config.ssl_bind_address.parse::().unwrap(); let ssl_cert_path = config.ssl_cert_path.clone(); let ssl_key_path = config.ssl_key_path.clone(); tokio::spawn(async move { // run with tls if ssl_enabled and cert and key path are set - if ssl && ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: {} (TLS)", ssl_bind_addr); - http_tracker.start_tls(ssl_bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; - } - if !ssl && enabled { + if ssl_enabled { + info!("Starting HTTPS server on: {} (TLS)", bind_addr); + http_tracker.start_tls(bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; + } else { info!("Starting HTTP server on: {}", bind_addr); http_tracker.start(bind_addr).await; } diff --git a/src/tracker.rs b/src/tracker.rs index 1caccff23..a1e172690 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -15,7 +15,7 @@ use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; use crate::torrust_http_tracker::AnnounceRequest; -#[derive(Serialize, Deserialize, Clone, PartialEq)] +#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)] pub enum TrackerMode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] @@ -294,15 +294,15 @@ impl TorrentTracker { }) } - fn is_public(&self) -> bool { + pub fn is_public(&self) -> bool { self.config.mode == TrackerMode::PublicMode } - fn is_private(&self) -> bool { + pub fn is_private(&self) -> bool { self.config.mode == TrackerMode::PrivateMode || self.config.mode == TrackerMode::PrivateListedMode } - fn is_whitelisted(&self) -> bool { + pub fn is_whitelisted(&self) -> bool { self.config.mode == TrackerMode::ListedMode || self.config.mode == TrackerMode::PrivateListedMode } From 12523c4c4ad05a845cfc46b4f0636a50e06a56bc Mon Sep 17 00:00:00 2001 From: Power2All Date: Tue, 12 Apr 2022 13:06:33 +0200 Subject: [PATCH 005/435] Bypassing a error --- src/tracker.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/tracker.rs b/src/tracker.rs index a1e172690..43dd3b00f 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -202,7 +202,9 @@ impl TorrentEntry { } AnnounceEvent::Stopped => { if peer_old.is_seeder() { - self.seeders -= 1; + if self.seeders != 0 { + self.seeders -= 1; + } } } // impossible, started should be the first time a peer announces itself From 64fd27183d3faf20eab36c7dee7c439876c5cbe8 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 13 Apr 2022 01:46:16 +0200 Subject: [PATCH 006/435] fix: fixed PeerKeyInvalid error when using MySQL --- src/mysql_database.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index f6907f309..0597d46aa 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -1,5 +1,5 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry, database}; +use crate::{InfoHash, AUTH_KEY_LENGTH, database}; use log::debug; use r2d2::{Pool}; use crate::key_manager::AuthKey; @@ -9,6 +9,7 @@ use async_trait::async_trait; use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; +use crate::torrent::TorrentEntry; pub struct MysqlDatabase { pool: Pool @@ -136,7 +137,7 @@ impl Database for MysqlDatabase { async fn get_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key => key }) + match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) .map_err(|_| database::Error::QueryReturnedNoRows)? { Some((key, valid_until)) => { Ok(AuthKey { From fa330c7d94527613cf36cd47eae0dc0d98f0d481 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 13 Apr 2022 02:14:56 +0200 Subject: [PATCH 007/435] fix: stats tracker now runs on a dedicated thread instead of spawning every request --- Cargo.lock | 1 + Cargo.toml | 2 +- src/database.rs | 3 +- src/http_api_server.rs | 2 +- src/lib.rs | 2 + src/sqlite_database.rs | 3 +- src/torrent.rs | 221 ++++++++++++++++++++++ src/torrust_http_tracker/handlers.rs | 43 ++--- src/torrust_udp_tracker/handlers.rs | 48 ++--- src/tracker.rs | 271 ++------------------------- src/tracker_stats.rs | 123 ++++++++++++ 11 files changed, 400 insertions(+), 319 deletions(-) create mode 100644 src/torrent.rs create mode 100644 src/tracker_stats.rs diff --git a/Cargo.lock b/Cargo.lock index 68f52fa18..2cb824d71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2224,6 +2224,7 @@ dependencies = [ "mio", "num_cpus", "once_cell", + "parking_lot", "pin-project-lite", "signal-hook-registry", "tokio-macros", diff --git a/Cargo.toml b/Cargo.toml index e3e57c378..81f76abe9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ serde_json = "1.0.72" hex = "0.4.3" percent-encoding = "2.1.0" warp = {version = "0.3", features = ["tls"]} -tokio = {version = "1.7", features = ["macros", "io-util", "net", "time", "rt-multi-thread", "fs", "sync", "signal"]} +tokio = {version = "1.7", features = ["full"]} binascii = "0.1" toml = "0.5" log = {version = "0.4", features = ["release_max_level_info"]} diff --git a/src/database.rs b/src/database.rs index 0c0e4a303..18bf41994 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,5 +1,5 @@ use std::collections::BTreeMap; -use crate::{InfoHash, TorrentEntry}; +use crate::{InfoHash}; use crate::key_manager::AuthKey; use crate::sqlite_database::SqliteDatabase; use async_trait::async_trait; @@ -7,6 +7,7 @@ use derive_more::{Display, Error}; use log::debug; use crate::mysql_database::MysqlDatabase; use serde::{Serialize, Deserialize}; +use crate::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { diff --git a/src/http_api_server.rs b/src/http_api_server.rs index a6bee4a14..eff45fc33 100644 --- a/src/http_api_server.rs +++ b/src/http_api_server.rs @@ -4,7 +4,7 @@ use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use warp::{filters, reply, reply::Reply, serve, Filter, Server}; -use crate::TorrentPeer; +use crate::torrent::TorrentPeer; use super::common::*; #[derive(Deserialize, Debug)] diff --git a/src/lib.rs b/src/lib.rs index addc7e1a1..3d928aff4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,6 +10,8 @@ pub mod torrust_udp_tracker; pub mod torrust_http_tracker; pub mod database; pub mod mysql_database; +pub mod torrent; +pub mod tracker_stats; pub use self::config::*; pub use torrust_udp_tracker::server::*; diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index c54a3c79c..5facd99d8 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -1,5 +1,5 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry, database}; +use crate::{InfoHash, AUTH_KEY_LENGTH, database}; use log::debug; use r2d2_sqlite::{SqliteConnectionManager}; use r2d2::{Pool}; @@ -8,6 +8,7 @@ use crate::key_manager::AuthKey; use std::str::FromStr; use crate::database::Database; use async_trait::async_trait; +use crate::torrent::TorrentEntry; pub struct SqliteDatabase { pool: Pool diff --git a/src/torrent.rs b/src/torrent.rs new file mode 100644 index 000000000..ef933d224 --- /dev/null +++ b/src/torrent.rs @@ -0,0 +1,221 @@ +use std::borrow::Cow; +use std::net::{IpAddr, SocketAddr}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use serde::{Serialize, Deserialize}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; +use crate::torrust_http_tracker::AnnounceRequest; +use crate::common::{AnnounceEventDef, NumberOfBytesDef}; + +#[derive(PartialEq, Eq, Debug, Clone, Serialize)] +pub struct TorrentPeer { + pub peer_id: PeerId, + pub peer_addr: SocketAddr, + #[serde(serialize_with = "ser_instant")] + pub updated: std::time::Instant, + #[serde(with = "NumberOfBytesDef")] + pub uploaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub downloaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub left: NumberOfBytes, + #[serde(with = "AnnounceEventDef")] + pub event: AnnounceEvent, +} + +impl TorrentPeer { + pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); + + TorrentPeer { + peer_id: PeerId(announce_request.peer_id.0), + peer_addr, + updated: std::time::Instant::now(), + uploaded: announce_request.bytes_uploaded, + downloaded: announce_request.bytes_downloaded, + left: announce_request.bytes_left, + event: announce_request.event + } + } + + pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); + + let event: AnnounceEvent = if let Some(event) = &announce_request.event { + match event.as_ref() { + "started" => AnnounceEvent::Started, + "stopped" => AnnounceEvent::Stopped, + "completed" => AnnounceEvent::Completed, + _ => AnnounceEvent::None + } + } else { + AnnounceEvent::None + }; + + TorrentPeer { + peer_id: announce_request.peer_id.clone(), + peer_addr, + updated: std::time::Instant::now(), + uploaded: NumberOfBytes(announce_request.uploaded as i64), + downloaded: NumberOfBytes(announce_request.downloaded as i64), + left: NumberOfBytes(announce_request.left as i64), + event + } + } + + // potentially substitute localhost ip with external ip + pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { + if remote_ip.is_loopback() && host_opt_ip.is_some() { + SocketAddr::new(host_opt_ip.unwrap(), port) + } else { + SocketAddr::new(remote_ip, port) + } + } + + pub(crate) fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + + fn is_completed(&self) -> bool { + self.event == AnnounceEvent::Completed + } +} + +fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { + ser.serialize_u64(inst.elapsed().as_millis() as u64) +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct TorrentEntry { + #[serde(skip)] + pub(crate) peers: std::collections::BTreeMap, + pub(crate) completed: u32, + #[serde(skip)] + pub(crate) seeders: u32, +} + +impl TorrentEntry { + pub fn new() -> TorrentEntry { + TorrentEntry { + peers: std::collections::BTreeMap::new(), + completed: 0, + seeders: 0, + } + } + + pub fn update_peer(&mut self, peer: &TorrentPeer) { + match peer.event { + AnnounceEvent::Stopped => { + let peer_old = self.peers.remove(&peer.peer_id); + self.update_torrent_stats_with_peer(peer, peer_old); + } + _ => { + let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); + self.update_torrent_stats_with_peer(peer, peer_old); + } + } + } + + pub fn get_peers(&self, remote_addr: Option<&std::net::SocketAddr>) -> Vec { + let mut list = Vec::new(); + for (_, peer) in self + .peers + .iter() + .filter(|e| match remote_addr { + // don't filter on ip_version + None => true, + // filter out different ip_version from remote_addr + Some(remote_address) => { + match e.1.peer_addr.ip() { + IpAddr::V4(_) => { remote_address.is_ipv4() } + IpAddr::V6(_) => { remote_address.is_ipv6() } + } + } + }) + .take(MAX_SCRAPE_TORRENTS as usize) + { + + // skip ip address of client + if let Some(remote_addr) = remote_addr { + if peer.peer_addr == *remote_addr { + continue; + } + } + + list.push(peer.clone()); + } + list + } + + pub fn update_torrent_stats_with_peer(&mut self, peer: &TorrentPeer, peer_old: Option) { + match peer_old { + None => { + if peer.is_seeder() { + self.seeders += 1; + } + + if peer.is_completed() { + self.completed += 1; + } + } + Some(peer_old) => { + match peer.event { + AnnounceEvent::None => { + if peer.is_seeder() && !peer_old.is_seeder() { + self.seeders += 1; + } + } + AnnounceEvent::Completed => { + if peer.is_seeder() && !peer_old.is_seeder() { + self.seeders += 1; + } + + // don't double count completed + if !peer_old.is_completed() { + self.completed += 1; + } + } + AnnounceEvent::Stopped => { + if peer_old.is_seeder() { + if self.seeders != 0 { + self.seeders -= 1; + } + } + } + // impossible, started should be the first time a peer announces itself + AnnounceEvent::Started => {} + } + } + } + } + + pub fn get_stats(&self) -> (u32, u32, u32) { + let leechers: u32 = if self.seeders < (self.peers.len() as u32) { + (self.peers.len() as u32) - self.seeders + } else { + 0 + }; + + (self.seeders, self.completed, leechers) + } +} + +#[derive(Serialize, Deserialize)] +struct DatabaseRow<'a> { + info_hash: InfoHash, + entry: Cow<'a, TorrentEntry>, +} + +#[derive(Debug)] +pub struct TorrentStats { + pub completed: u32, + pub seeders: u32, + pub leechers: u32, +} + +#[derive(Debug)] +pub enum TorrentError { + TorrentNotWhitelisted, + PeerNotAuthenticated, + PeerKeyNotValid, + NoPeersFound, + CouldNotSendResponse, + InvalidInfoHash, +} diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index f15b7143a..8762faeaf 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -1,12 +1,15 @@ use std::collections::HashMap; use std::convert::Infallible; +use std::net::IpAddr; use std::sync::Arc; use log::debug; use warp::{reject, Rejection, Reply}; use warp::http::{Response}; -use crate::{InfoHash, TorrentError, TorrentPeer, TorrentStats, TorrentTracker}; +use crate::{InfoHash, TorrentTracker}; use crate::key_manager::AuthKey; +use crate::torrent::{TorrentError, TorrentPeer, TorrentStats}; use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; +use crate::tracker_stats::TrackerStatsEvent; use crate::utils::url_encode_bytes; /// Authenticate InfoHash using optional AuthKey @@ -42,23 +45,14 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option // get all torrent peers excluding the peer_addr let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; - // success response - let tracker_copy = tracker.clone(); - let is_ipv4 = announce_request.peer_addr.is_ipv4(); - - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if is_ipv4 { - status_writer.tcp4_connections_handled += 1; - status_writer.tcp4_announces_handled += 1; - } else { - status_writer.tcp6_connections_handled += 1; - status_writer.tcp6_announces_handled += 1; - } - }); - let announce_interval = tracker.config.announce_interval; + // send stats event + match announce_request.peer_addr { + IpAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp4Announce).await; } + IpAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp6Announce).await; } + } + send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) } @@ -86,18 +80,11 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp4Scrape).await; } + IpAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp6Scrape).await; } + } send_scrape_response(files) } diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index bf25a8861..df1a15451 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -1,9 +1,11 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentError, TorrentPeer, TorrentTracker}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; +use crate::torrent::{TorrentError, TorrentPeer}; use crate::torrust_udp_tracker::errors::ServerError; use crate::torrust_udp_tracker::request::AnnounceRequestWrapper; +use crate::tracker_stats::TrackerStatsEvent; use crate::utils::get_connection_id; pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { @@ -71,15 +73,11 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t connection_id, }); - let tracker_copy = tracker.clone(); - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if remote_addr.is_ipv4() { - status_writer.udp4_connections_handled += 1; - } else { - status_writer.udp6_connections_handled += 1; - } - }); + // send stats event + match remote_addr { + SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Connect).await; } + SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Connect).await; } + } Ok(response) } @@ -98,16 +96,6 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc // get all peers excluding the client_addr let peers = tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await; - let tracker_copy = tracker.clone(); - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if remote_addr.is_ipv4() { - status_writer.udp4_announces_handled += 1; - } else { - status_writer.udp6_announces_handled += 1; - } - }); - let announce_response = if remote_addr.is_ipv4() { Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, @@ -144,6 +132,12 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc }) }; + // send stats event + match remote_addr { + SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Announce).await; } + SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Announce).await; } + } + Ok(announce_response) } @@ -180,15 +174,11 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra torrent_stats.push(scrape_entry); } - let tracker_copy = tracker.clone(); - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if remote_addr.is_ipv4() { - status_writer.udp4_scrapes_handled += 1; - } else { - status_writer.udp6_scrapes_handled += 1; - } - }); + // send stats event + match remote_addr { + SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Scrape).await; } + SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Scrape).await; } + } Ok(Response::from(ScrapeResponse { transaction_id: request.transaction_id, diff --git a/src/tracker.rs b/src/tracker.rs index 43dd3b00f..eb6f006bf 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -1,19 +1,18 @@ use serde::{Deserialize, Serialize}; use serde; -use std::borrow::Cow; use std::collections::BTreeMap; -use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use crate::common::{AnnounceEventDef, InfoHash, NumberOfBytesDef, PeerId}; -use std::net::{IpAddr, SocketAddr}; -use crate::{Configuration, database, key_manager, MAX_SCRAPE_TORRENTS}; +use tokio::sync::{RwLock, RwLockReadGuard}; +use crate::common::{InfoHash}; +use std::net::{SocketAddr}; +use crate::{Configuration, database, key_manager}; use std::collections::btree_map::Entry; use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use log::info; use crate::key_manager::AuthKey; use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; -use crate::torrust_http_tracker::AnnounceRequest; +use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; +use crate::tracker_stats::{StatsTracker, TrackerStats}; #[derive(Serialize, Deserialize, Clone, PartialEq, Debug)] pub enum TrackerMode { @@ -34,265 +33,25 @@ pub enum TrackerMode { PrivateListedMode, } -#[derive(PartialEq, Eq, Debug, Clone, Serialize)] -pub struct TorrentPeer { - pub peer_id: PeerId, - pub peer_addr: SocketAddr, - #[serde(serialize_with = "ser_instant")] - pub updated: std::time::Instant, - #[serde(with = "NumberOfBytesDef")] - pub uploaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub downloaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub left: NumberOfBytes, - #[serde(with = "AnnounceEventDef")] - pub event: AnnounceEvent, -} - -impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - - TorrentPeer { - peer_id: PeerId(announce_request.peer_id.0), - peer_addr, - updated: std::time::Instant::now(), - uploaded: announce_request.bytes_uploaded, - downloaded: announce_request.bytes_downloaded, - left: announce_request.bytes_left, - event: announce_request.event - } - } - - pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); - - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None - } - } else { - AnnounceEvent::None - }; - - TorrentPeer { - peer_id: announce_request.peer_id.clone(), - peer_addr, - updated: std::time::Instant::now(), - uploaded: NumberOfBytes(announce_request.uploaded as i64), - downloaded: NumberOfBytes(announce_request.downloaded as i64), - left: NumberOfBytes(announce_request.left as i64), - event - } - } - - // potentially substitute localhost ip with external ip - pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if remote_ip.is_loopback() && host_opt_ip.is_some() { - SocketAddr::new(host_opt_ip.unwrap(), port) - } else { - SocketAddr::new(remote_ip, port) - } - } - - fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } - - fn is_completed(&self) -> bool { - self.event == AnnounceEvent::Completed - } -} - -fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) -} - -#[derive(Serialize, Deserialize, Clone)] -pub struct TorrentEntry { - #[serde(skip)] - peers: std::collections::BTreeMap, - completed: u32, - #[serde(skip)] - seeders: u32, -} - -impl TorrentEntry { - pub fn new() -> TorrentEntry { - TorrentEntry { - peers: std::collections::BTreeMap::new(), - completed: 0, - seeders: 0, - } - } - - pub fn update_peer(&mut self, peer: &TorrentPeer) { - match peer.event { - AnnounceEvent::Stopped => { - let peer_old = self.peers.remove(&peer.peer_id); - self.update_torrent_stats_with_peer(peer, peer_old); - } - _ => { - let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); - self.update_torrent_stats_with_peer(peer, peer_old); - } - } - } - - pub fn get_peers(&self, remote_addr: Option<&std::net::SocketAddr>) -> Vec { - let mut list = Vec::new(); - for (_, peer) in self - .peers - .iter() - .filter(|e| match remote_addr { - // don't filter on ip_version - None => true, - // filter out different ip_version from remote_addr - Some(remote_address) => { - match e.1.peer_addr.ip() { - IpAddr::V4(_) => { remote_address.is_ipv4() } - IpAddr::V6(_) => { remote_address.is_ipv6() } - } - } - }) - .take(MAX_SCRAPE_TORRENTS as usize) - { - - // skip ip address of client - if let Some(remote_addr) = remote_addr { - if peer.peer_addr == *remote_addr { - continue; - } - } - - list.push(peer.clone()); - } - list - } - - pub fn update_torrent_stats_with_peer(&mut self, peer: &TorrentPeer, peer_old: Option) { - match peer_old { - None => { - if peer.is_seeder() { - self.seeders += 1; - } - - if peer.is_completed() { - self.completed += 1; - } - } - Some(peer_old) => { - match peer.event { - AnnounceEvent::None => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - } - AnnounceEvent::Completed => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - - // don't double count completed - if !peer_old.is_completed() { - self.completed += 1; - } - } - AnnounceEvent::Stopped => { - if peer_old.is_seeder() { - if self.seeders != 0 { - self.seeders -= 1; - } - } - } - // impossible, started should be the first time a peer announces itself - AnnounceEvent::Started => {} - } - } - } - } - - pub fn get_stats(&self) -> (u32, u32, u32) { - let leechers: u32 = if self.seeders < (self.peers.len() as u32) { - (self.peers.len() as u32) - self.seeders - } else { - 0 - }; - - (self.seeders, self.completed, leechers) - } -} - -#[derive(Serialize, Deserialize)] -struct DatabaseRow<'a> { - info_hash: InfoHash, - entry: Cow<'a, TorrentEntry>, -} - -#[derive(Debug)] -pub struct TorrentStats { - pub completed: u32, - pub seeders: u32, - pub leechers: u32, -} - -#[derive(Debug)] -pub enum TorrentError { - TorrentNotWhitelisted, - PeerNotAuthenticated, - PeerKeyNotValid, - NoPeersFound, - CouldNotSendResponse, - InvalidInfoHash, -} - -#[derive(Debug)] -pub struct TrackerStats { - pub tcp4_connections_handled: u64, - pub tcp4_announces_handled: u64, - pub tcp4_scrapes_handled: u64, - pub tcp6_connections_handled: u64, - pub tcp6_announces_handled: u64, - pub tcp6_scrapes_handled: u64, - pub udp4_connections_handled: u64, - pub udp4_announces_handled: u64, - pub udp4_scrapes_handled: u64, - pub udp6_connections_handled: u64, - pub udp6_announces_handled: u64, - pub udp6_scrapes_handled: u64, -} - pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, database: Box, - stats: tokio::sync::RwLock, + pub stats_tracker: StatsTracker } impl TorrentTracker { pub fn new(config: Arc) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; + let mut stats_tracker = StatsTracker::new(); + + stats_tracker.run_worker(); Ok(TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), database, - stats: RwLock::new(TrackerStats { - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }), + stats_tracker }) } @@ -359,7 +118,7 @@ impl TorrentTracker { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { - self.add_torrent(torrent.0, 0, torrent.1, 0).await; + let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; } Ok(()) @@ -449,12 +208,8 @@ impl TorrentTracker { self.torrents.read().await } - pub async fn set_stats(&self) -> RwLockWriteGuard<'_, TrackerStats> { - self.stats.write().await - } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { - self.stats.read().await + self.stats_tracker.get_stats().await } // remove torrents without peers if enabled, and defragment memory diff --git a/src/tracker_stats.rs b/src/tracker_stats.rs new file mode 100644 index 000000000..1a6a71c2b --- /dev/null +++ b/src/tracker_stats.rs @@ -0,0 +1,123 @@ +use std::sync::Arc; +use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; +use tokio::sync::mpsc::{Sender}; +use tokio::sync::mpsc::error::SendError; + +const CHANNEL_BUFFER_SIZE: usize = 65_535; + +#[derive(Debug)] +pub enum TrackerStatsEvent { + Tcp4Announce, + Tcp4Scrape, + Tcp6Announce, + Tcp6Scrape, + Udp4Connect, + Udp4Announce, + Udp4Scrape, + Udp6Connect, + Udp6Announce, + Udp6Scrape +} + +#[derive(Debug)] +pub struct TrackerStats { + pub tcp4_connections_handled: u64, + pub tcp4_announces_handled: u64, + pub tcp4_scrapes_handled: u64, + pub tcp6_connections_handled: u64, + pub tcp6_announces_handled: u64, + pub tcp6_scrapes_handled: u64, + pub udp4_connections_handled: u64, + pub udp4_announces_handled: u64, + pub udp4_scrapes_handled: u64, + pub udp6_connections_handled: u64, + pub udp6_announces_handled: u64, + pub udp6_scrapes_handled: u64, +} + +impl TrackerStats { + pub fn new() -> Self { + Self { + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + } + } +} + +pub struct StatsTracker { + channel_sender: Option>, + pub stats: Arc> +} + +impl StatsTracker { + pub fn new() -> Self { + Self { + channel_sender: None, + stats: Arc::new(RwLock::new(TrackerStats::new())) + } + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { + self.stats.read().await + } + + pub async fn send_event(&self, event: TrackerStatsEvent) -> Option>> { + if let Some(tx) = &self.channel_sender { + Some(tx.send(event).await) + } else { + None + } + } + + pub fn run_worker(&mut self) { + let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + + // set send channel on stats_tracker + self.channel_sender = Some(tx); + + let stats = self.stats.clone(); + + tokio::spawn(async move { + while let Some(event) = rx.recv().await { + let mut stats_lock = stats.write().await; + + match event { + TrackerStatsEvent::Tcp4Announce => { + stats_lock.tcp4_announces_handled += 1; + stats_lock.tcp4_connections_handled += 1; + } + TrackerStatsEvent::Tcp4Scrape => { + stats_lock.tcp4_scrapes_handled += 1; + stats_lock.tcp4_connections_handled += 1; + } + TrackerStatsEvent::Tcp6Announce => { + stats_lock.tcp6_announces_handled += 1; + stats_lock.tcp6_connections_handled += 1; + } + TrackerStatsEvent::Tcp6Scrape => { + stats_lock.tcp6_scrapes_handled += 1; + stats_lock.tcp6_connections_handled += 1; + } + TrackerStatsEvent::Udp4Connect => { stats_lock.udp4_connections_handled += 1; } + TrackerStatsEvent::Udp4Announce => { stats_lock.udp4_announces_handled += 1; } + TrackerStatsEvent::Udp4Scrape => { stats_lock.udp4_scrapes_handled += 1; } + TrackerStatsEvent::Udp6Connect => { stats_lock.udp6_connections_handled += 1; } + TrackerStatsEvent::Udp6Announce => { stats_lock.udp6_announces_handled += 1; } + TrackerStatsEvent::Udp6Scrape => { stats_lock.udp6_scrapes_handled += 1; } + } + + drop(stats_lock); + } + }); + } +} From 6cec6f2a32ae05104183e5c0e9d098799b6345a8 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 14:53:23 +0200 Subject: [PATCH 008/435] Adding periodic --- src/config.rs | 2 ++ src/main.rs | 22 ++++++++++++++++++++ src/tracker.rs | 54 +++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 77 insertions(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index f9166e577..3045c1ca9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -56,6 +56,7 @@ pub struct Configuration { pub db_driver: DatabaseDrivers, pub db_path: String, pub persistence: bool, + pub persistence_interval: Option, pub cleanup_interval: Option, pub cleanup_peerless: bool, pub external_ip: Option, @@ -139,6 +140,7 @@ impl Configuration { db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), persistence: false, + persistence_interval: Some(900), cleanup_interval: Some(600), cleanup_peerless: true, external_ip: Some(String::from("0.0.0.0")), diff --git a/src/main.rs b/src/main.rs index 721385760..c3d9ba23c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -29,6 +29,8 @@ async fn main() { panic!("Could not load persistent torrents.") }; info!("Persistent torrents loaded."); + + let _torrent_periodic_job = start_torrent_periodic_job(config.clone(), tracker.clone()).unwrap(); } // start torrent cleanup job (periodically removes old peers) @@ -89,6 +91,26 @@ async fn main() { } } +fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> Option> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.persistence_interval.unwrap_or(900); + + return Some(tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; // first tick is immediate... + // periodically call tracker.cleanup_torrents() + loop { + interval.tick().await; + if let Some(tracker) = weak_tracker.upgrade() { + tracker.periodic_saving().await; + } else { + break; + } + } + })); +} + fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> Option> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.cleanup_interval.unwrap_or(600); diff --git a/src/tracker.rs b/src/tracker.rs index eb6f006bf..b2a65b4ce 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; use serde; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::common::{InfoHash}; use std::net::{SocketAddr}; @@ -36,6 +36,8 @@ pub enum TrackerMode { pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, + updates: tokio::sync::RwLock>, + shadow: tokio::sync::RwLock>, database: Box, pub stats_tracker: StatsTracker } @@ -50,6 +52,8 @@ impl TorrentTracker { Ok(TorrentTracker { config, torrents: RwLock::new(std::collections::BTreeMap::new()), + updates: RwLock::new(std::collections::HashMap::new()), + shadow: RwLock::new(std::collections::HashMap::new()), database, stats_tracker }) @@ -178,6 +182,15 @@ impl TorrentTracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); + if self.config.persistence { + let mut updates = self.updates.write().await; + if updates.contains_key(info_hash) { + updates.remove(info_hash); + } + updates.insert(*info_hash, completed); + drop(updates); + } + TorrentStats { seeders, leechers, @@ -261,4 +274,43 @@ impl TorrentTracker { } info!("Torrents cleaned up."); } + + pub async fn periodic_saving(&self) { + // Get a lock for writing + let mut shadow = self.shadow.write().await; + + // We will get the data and insert it into the shadow, while clearing updates. + let mut updates = self.updates.write().await; + + for (infohash, completed) in updates.iter() { + if shadow.contains_key(infohash) { + shadow.remove(infohash); + } + shadow.insert(*infohash, *completed); + } + updates.clear(); + drop(updates); + + // We get shadow data into local array to be handled. + let mut shadow_copy: BTreeMap = BTreeMap::new(); + for (infohash, completed) in shadow.iter() { + shadow_copy.insert(*infohash, TorrentEntry{ + peers: Default::default(), + completed: *completed, + seeders: 0 + }); + } + + // Drop the lock + drop(shadow); + + // We will now save the data from the shadow into the database. + // This should not put any strain on the server itself, other then the harddisk/ssd. + let result = self.database.save_persistent_torrent_data(&shadow_copy).await; + if result.is_ok() { + let mut shadow = self.shadow.write().await; + shadow.clear(); + drop(shadow); + } + } } From f7b6c4e0f4914a30b69ef193a18a671d5a3509e2 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 15:10:30 +0200 Subject: [PATCH 009/435] Fix the saving... --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index c3d9ba23c..ca632177d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -84,7 +84,7 @@ async fn main() { // Save torrents if enabled if config.persistence { info!("Saving torrents into SQL from memory..."); - let _ = tracker.save_torrents().await; + let _ = tracker.periodic_saving().await; info!("Torrents saved"); } } From f581289b48a96773eab9676fd0db82c7a1890b50 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 15:34:56 +0200 Subject: [PATCH 010/435] Showing information that it's saving periodically --- src/main.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main.rs b/src/main.rs index ca632177d..2c5ed8678 100644 --- a/src/main.rs +++ b/src/main.rs @@ -103,7 +103,9 @@ fn start_torrent_periodic_job(config: Arc, tracker: Arc Date: Wed, 13 Apr 2022 16:47:22 +0200 Subject: [PATCH 011/435] Adding memory logging --- src/config.rs | 2 ++ src/main.rs | 23 +++++++++++++++++++++++ src/tracker.rs | 16 +++++++++++++++- 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index 3045c1ca9..67078d608 100644 --- a/src/config.rs +++ b/src/config.rs @@ -52,6 +52,7 @@ pub struct HttpApiConfig { #[derive(Serialize, Deserialize)] pub struct Configuration { pub log_level: Option, + pub log_interval: Option, pub mode: TrackerMode, pub db_driver: DatabaseDrivers, pub db_path: String, @@ -136,6 +137,7 @@ impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), + log_interval: Some(60), mode: TrackerMode::PublicMode, db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), diff --git a/src/main.rs b/src/main.rs index 2c5ed8678..9ba45427e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -70,6 +70,9 @@ async fn main() { let _ = start_http_tracker_server(&http_tracker, tracker.clone()); } + // start a thread to post statistics + let _ = start_statistics_job(config.clone(), tracker.clone()).unwrap(); + // handle the signals here tokio::select! { _ = tokio::signal::ctrl_c() => { @@ -133,6 +136,26 @@ fn start_torrent_cleanup_job(config: Arc, tracker: Arc, tracker: Arc) -> Option> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.log_interval.unwrap_or(60); + + return Some(tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; // first tick is immediate... + // periodically call tracker.cleanup_torrents() + loop { + interval.tick().await; + if let Some(tracker) = weak_tracker.upgrade() { + tracker.post_log().await; + } else { + break; + } + } + })); +} + fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> JoinHandle<()> { info!("Starting HTTP API server on: {}", config.bind_address); let bind_addr = config.bind_address.parse::().unwrap(); diff --git a/src/tracker.rs b/src/tracker.rs index b2a65b4ce..68243a02e 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -1,11 +1,12 @@ use serde::{Deserialize, Serialize}; use serde; -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::common::{InfoHash}; use std::net::{SocketAddr}; use crate::{Configuration, database, key_manager}; use std::collections::btree_map::Entry; +use std::mem; use std::sync::Arc; use log::info; use crate::key_manager::AuthKey; @@ -225,6 +226,19 @@ impl TorrentTracker { self.stats_tracker.get_stats().await } + pub async fn post_log(&self) { + let torrents = self.torrents.read().await; + let torrents_size = em::size_of_val(&*torrents); + drop(torrents); + let updates = self.updates.read().await; + let updates_size = em::size_of_val(&*updates); + drop(updates); + let shadow = self.shadow.read().await; + let shadow_size = em::size_of_val(&*shadow); + drop(shadow); + info!("Stats [::] Torrents: {} byte(s) | Updates: {} byte(s) | Shadow: {} byte(s)", torrents_size, updates_size, shadow_size); + } + // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { info!("Cleaning torrents..."); From 41785b62ef7a000b0278f033c629e889c2a738f6 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 16:49:21 +0200 Subject: [PATCH 012/435] Typo ... --- src/tracker.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tracker.rs b/src/tracker.rs index 68243a02e..4aaa767be 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -228,13 +228,13 @@ impl TorrentTracker { pub async fn post_log(&self) { let torrents = self.torrents.read().await; - let torrents_size = em::size_of_val(&*torrents); + let torrents_size = mem::size_of_val(&*torrents); drop(torrents); let updates = self.updates.read().await; - let updates_size = em::size_of_val(&*updates); + let updates_size = mem::size_of_val(&*updates); drop(updates); let shadow = self.shadow.read().await; - let shadow_size = em::size_of_val(&*shadow); + let shadow_size = mem::size_of_val(&*shadow); drop(shadow); info!("Stats [::] Torrents: {} byte(s) | Updates: {} byte(s) | Shadow: {} byte(s)", torrents_size, updates_size, shadow_size); } From c6dff90018d6b2a317cd493da9ebadcd40e3815e Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 19:41:58 +0200 Subject: [PATCH 013/435] Adding a statistics viewing in console --- Cargo.lock | 764 ++++++++++++++++++++++++------------------------- src/tracker.rs | 15 +- 2 files changed, 381 insertions(+), 398 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2cb824d71..7bc7233c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,11 +10,11 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom", "once_cell", "version_check", ] @@ -39,8 +39,8 @@ dependencies = [ [[package]] name = "aquatic_udp_protocol" -version = "0.1.0" -source = "git+https://github.com/greatest-ape/aquatic#065e007ede84de20f20983b4b504471bbda2fdf2" +version = "0.2.0" +source = "git+https://github.com/greatest-ape/aquatic#26e2e874377a2682f52568f8e5e8c080c3366326" dependencies = [ "byteorder", "either", @@ -60,9 +60,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-trait" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" +checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" dependencies = [ "proc-macro2", "quote", @@ -82,9 +82,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base-x" @@ -107,7 +107,7 @@ dependencies = [ "num-bigint 0.3.3", "num-integer", "num-traits 0.2.14", - "serde 1.0.120", + "serde 1.0.136", ] [[package]] @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitvec" @@ -157,34 +157,22 @@ dependencies = [ "wyz", ] -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.3", -] - [[package]] name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.4", + "generic-array", ] [[package]] -name = "block-padding" -version = "0.1.5" +name = "block-buffer" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" dependencies = [ - "byte-tools", + "generic-array", ] [[package]] @@ -205,27 +193,21 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.8.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "byteorder" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" @@ -242,12 +224,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -263,7 +239,7 @@ dependencies = [ "libc", "num-integer", "num-traits 0.2.14", - "serde 1.0.120", + "serde 1.0.136", "time 0.1.44", "winapi", ] @@ -312,7 +288,7 @@ dependencies = [ "lazy_static", "nom", "rust-ini", - "serde 1.0.120", + "serde 1.0.136", "serde-hjson", "serde_json", "toml", @@ -349,9 +325,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" dependencies = [ "libc", ] @@ -362,7 +338,17 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", +] + +[[package]] +name = "crypto-common" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +dependencies = [ + "generic-array", + "typenum", ] [[package]] @@ -391,20 +377,21 @@ dependencies = [ [[package]] name = "digest" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.12.3", + "generic-array", ] [[package]] name = "digest" -version = "0.9.0" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" dependencies = [ - "generic-array 0.14.4", + "block-buffer 0.10.2", + "crypto-common", ] [[package]] @@ -432,12 +419,6 @@ dependencies = [ "termcolor", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -450,6 +431,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +dependencies = [ + "instant", +] + [[package]] name = "fern" version = "0.6.0" @@ -461,11 +451,11 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +checksum = "b39522e96686d38f4bc984b9198e3a0613264abaebaff2c5c918bfa6b6da09af" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crc32fast", "libc", "libz-sys", @@ -495,9 +485,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ "matches", "percent-encoding", @@ -664,18 +654,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.12.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", "version_check", @@ -683,22 +664,11 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.10.0+wasi-snapshot-preview1", ] @@ -711,9 +681,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.4" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" dependencies = [ "bytes", "fnv", @@ -724,16 +694,10 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util", + "tokio-util 0.7.1", "tracing", ] -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" - [[package]] name = "hashbrown" version = "0.11.2" @@ -745,18 +709,18 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.3" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62689dc57c7456e69712607ffcbd0aa1dfcccf9af73727e9b25bc1825375cac3" +checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" dependencies = [ "base64", "bitflags", "bytes", "headers-core", "http", + "httpdate", "mime", - "sha-1 0.8.2", - "time 0.1.44", + "sha-1 0.10.0", ] [[package]] @@ -770,9 +734,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] @@ -785,9 +749,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", @@ -796,25 +760,26 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.0" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", + "pin-project-lite", ] [[package]] name = "httparse" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "6330e8a36bd8c859f3fa6d9382911fbb7147ec39807f63b923933a247240b9ba" [[package]] name = "httpdate" -version = "0.3.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "humantime" @@ -824,9 +789,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.2" +version = "0.14.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" dependencies = [ "bytes", "futures-channel", @@ -838,8 +803,8 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project", - "socket2 0.3.19", + "pin-project-lite", + "socket2", "tokio", "tower-service", "tracing", @@ -859,30 +824,21 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" +checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" dependencies = [ "autocfg", - "hashbrown 0.9.1", -] - -[[package]] -name = "input_buffer" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" -dependencies = [ - "bytes", + "hashbrown", ] [[package]] name = "instant" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -899,15 +855,15 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.7" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "js-sys" -version = "0.3.55" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" +checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" dependencies = [ "wasm-bindgen", ] @@ -930,7 +886,7 @@ version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f404a90a744e32e8be729034fc33b90cf2a56418fbf594d69aa3c0214ad414e5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "lexical-core", ] @@ -942,16 +898,16 @@ checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" dependencies = [ "arrayvec 0.5.2", "bitflags", - "cfg-if 1.0.0", + "cfg-if", "ryu", "static_assertions", ] [[package]] name = "libc" -version = "0.2.120" +version = "0.2.123" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad5c14e80759d0939d013e6ca49930e59fc53dd8e5009132f76240c179380c09" +checksum = "cb691a747a7ab48abc15c5b42066eaafde10dc427e3b6ee2a1cf43db04c763bd" [[package]] name = "libloading" @@ -959,7 +915,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -992,20 +948,21 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" dependencies = [ + "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.13" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -1014,7 +971,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" dependencies = [ - "hashbrown 0.11.2", + "hashbrown", ] [[package]] @@ -1028,9 +985,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "memchr" @@ -1055,9 +1012,9 @@ checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] name = "mime_guess" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", @@ -1065,42 +1022,41 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" dependencies = [ "adler", - "autocfg", ] [[package]] name = "mio" -version = "0.7.7" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" +checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" dependencies = [ "libc", "log", "miow", "ntapi", + "wasi 0.11.0+wasi-snapshot-preview1", "winapi", ] [[package]] name = "miow" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "socket2 0.3.19", "winapi", ] [[package]] name = "multipart" -version = "0.17.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050aeedc89243f5347c3e237e3e13dc76fbe4ae3742a57b94dc14f69acf76d4" +checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" dependencies = [ "buf_redux", "httparse", @@ -1108,7 +1064,7 @@ dependencies = [ "mime", "mime_guess", "quick-error", - "rand 0.7.3", + "rand", "safemem", "tempfile", "twoway", @@ -1132,9 +1088,9 @@ dependencies = [ "once_cell", "pem", "percent-encoding", - "serde 1.0.120", + "serde 1.0.136", "serde_json", - "socket2 0.4.4", + "socket2", "twox-hash", "url", ] @@ -1162,11 +1118,11 @@ dependencies = [ "lexical", "num-bigint 0.4.3", "num-traits 0.2.14", - "rand 0.8.4", + "rand", "regex", "rust_decimal", "saturating", - "serde 1.0.120", + "serde 1.0.136", "serde_json", "sha1", "sha2", @@ -1188,9 +1144,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" +checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" dependencies = [ "lazy_static", "libc", @@ -1212,7 +1168,7 @@ checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "memoffset", ] @@ -1230,9 +1186,9 @@ dependencies = [ [[package]] name = "ntapi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" dependencies = [ "winapi", ] @@ -1289,9 +1245,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -1303,12 +1259,6 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - [[package]] name = "opaque-debug" version = "0.3.0" @@ -1322,7 +1272,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "libc", "once_cell", @@ -1350,22 +1300,32 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core", + "parking_lot_core 0.8.5", +] + +[[package]] +name = "parking_lot" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +dependencies = [ + "lock_api", + "parking_lot_core 0.9.2", ] [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall", @@ -1373,6 +1333,19 @@ dependencies = [ "winapi", ] +[[package]] +name = "parking_lot_core" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "995f667a6c822200b0433ac218e05582f0e2efa1b922a3fd2fbaadc5f87bab37" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -1398,18 +1371,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "1.0.4" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.4" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -1418,9 +1391,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -1430,15 +1403,15 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-hack" @@ -1448,9 +1421,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" dependencies = [ "unicode-xid", ] @@ -1463,9 +1436,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.8" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" dependencies = [ "proc-macro2", ] @@ -1477,7 +1450,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" dependencies = [ "log", - "parking_lot", + "parking_lot 0.11.2", "scheduled-thread-pool", ] @@ -1509,37 +1482,13 @@ checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" [[package]] name = "rand" -version = "0.7.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ - "getrandom 0.1.16", "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -1549,16 +1498,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -1567,41 +1507,23 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.3", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", + "getrandom", ] [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" dependencies = [ "aho-corasick", "memchr", @@ -1662,13 +1584,13 @@ checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" [[package]] name = "rust_decimal" -version = "1.22.0" +version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37baa70cf8662d2ba1c1868c5983dda16ef32b105cce41fb5c47e72936a90b3" +checksum = "22dc69eadbf0ee2110b8d20418c0c6edbaefec2811c4963dc17b6344e11fe0f8" dependencies = [ "arrayvec 0.7.2", "num-traits 0.2.14", - "serde 1.0.120", + "serde 1.0.136", ] [[package]] @@ -1692,7 +1614,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.4", + "semver 1.0.7", ] [[package]] @@ -1710,9 +1632,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "safemem" @@ -1742,7 +1664,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" dependencies = [ - "parking_lot", + "parking_lot 0.11.2", ] [[package]] @@ -1769,9 +1691,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.3.1" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" +checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" dependencies = [ "bitflags", "core-foundation", @@ -1801,9 +1723,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.4" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +checksum = "d65bd28f48be7196d222d95b9243287f48d27aca604e08497513019ff0502cc4" [[package]] name = "semver-parser" @@ -1819,9 +1741,9 @@ checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" [[package]] name = "serde" -version = "1.0.120" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "166b2349061381baf54a58e4b13c89369feb0ef2eaa57198899e2312aac30aab" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" dependencies = [ "serde_derive", ] @@ -1844,7 +1766,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" dependencies = [ - "serde 1.0.120", + "serde 1.0.136", "serde_bytes", ] @@ -1854,14 +1776,14 @@ version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" dependencies = [ - "serde 1.0.120", + "serde 1.0.136", ] [[package]] name = "serde_derive" -version = "1.0.120" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca2a8cb5805ce9e3b95435e3765b7b553cecc762d938d409434338386cb5775" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" dependencies = [ "proc-macro2", "quote", @@ -1870,50 +1792,49 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.72" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ffa0837f2dfa6fb90868c2b5468cad482e175f7dad97e7421951e663f2b527" +checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" dependencies = [ "itoa", "ryu", - "serde 1.0.120", + "serde 1.0.136", ] [[package]] name = "serde_urlencoded" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.120", + "serde 1.0.136", ] [[package]] name = "sha-1" -version = "0.8.2" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", ] [[package]] name = "sha-1" -version = "0.9.8" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest 0.10.3", ] [[package]] @@ -1938,10 +1859,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", - "opaque-debug 0.3.0", + "opaque-debug", ] [[package]] @@ -1952,35 +1873,24 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] [[package]] name = "slab" -version = "0.4.2" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] name = "smallvec" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" - -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi", -] +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "socket2" @@ -2035,7 +1945,7 @@ checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ "proc-macro2", "quote", - "serde 1.0.120", + "serde 1.0.136", "serde_derive", "syn", ] @@ -2049,7 +1959,7 @@ dependencies = [ "base-x", "proc-macro2", "quote", - "serde 1.0.120", + "serde 1.0.136", "serde_derive", "serde_json", "sha1", @@ -2080,9 +1990,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.67" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6498a9efc342871f91cc2d0d694c674368b4ceb40f62b65a7a08c3792935e702" +checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" dependencies = [ "proc-macro2", "quote", @@ -2097,13 +2007,13 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "fastrand", "libc", - "rand 0.8.4", "redox_syscall", "remove_dir_all", "winapi", @@ -2129,18 +2039,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.26" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.26" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", @@ -2198,9 +2108,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.3.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -2213,29 +2123,29 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.7.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c79ba603c337335df6ba6dd6afc38c38a7d5e1b0c871678439ea973cd62a118e" +checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" dependencies = [ - "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", "once_cell", - "parking_lot", + "parking_lot 0.12.0", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", "winapi", ] [[package]] name = "tokio-macros" -version = "1.1.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", @@ -2255,9 +2165,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.2" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", "pin-project-lite", @@ -2266,9 +2176,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.13.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" +checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" dependencies = [ "futures-util", "log", @@ -2279,9 +2189,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.7" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes", "futures-core", @@ -2291,13 +2201,27 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-util" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + [[package]] name = "toml" version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ - "serde 1.0.120", + "serde 1.0.136", ] [[package]] @@ -2319,8 +2243,8 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand 0.8.4", - "serde 1.0.120", + "rand", + "serde 1.0.136", "serde_bencode", "serde_bytes", "serde_json", @@ -2332,27 +2256,39 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "80b9fa4360528139bc96100c160b7ae879f5567f49f1782b0b02035b0358ebf3" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tracing-core" -version = "0.1.17" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "6dfce9f3241b150f36e8e54bb561a742d5daa1a47b5dd9a5ce369fd4a4db2210" dependencies = [ "lazy_static", ] @@ -2365,19 +2301,19 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.12.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" dependencies = [ "base64", "byteorder", "bytes", "http", "httparse", - "input_buffer", "log", - "rand 0.8.4", + "rand", "sha-1 0.9.8", + "thiserror", "url", "utf-8", ] @@ -2397,16 +2333,16 @@ version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" dependencies = [ - "cfg-if 1.0.0", - "rand 0.8.4", + "cfg-if", + "rand", "static_assertions", ] [[package]] name = "typenum" -version = "1.12.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "unicase" @@ -2419,9 +2355,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" @@ -2440,9 +2376,9 @@ checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "untrusted" @@ -2488,9 +2424,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" @@ -2504,12 +2440,13 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332d47745e9a0c38636dbd454729b147d16bd1ed08ae67b3ab281c4506771054" +checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e" dependencies = [ "bytes", - "futures", + "futures-channel", + "futures-util", "headers", "http", "hyper", @@ -2520,45 +2457,45 @@ dependencies = [ "percent-encoding", "pin-project", "scoped-tls", - "serde 1.0.120", + "serde 1.0.136", "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", "tokio-stream", "tokio-tungstenite", - "tokio-util", + "tokio-util 0.6.9", "tower-service", "tracing", ] [[package]] name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" +checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" +checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" dependencies = [ "bumpalo", "lazy_static", @@ -2571,9 +2508,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" +checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2581,9 +2518,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" +checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" dependencies = [ "proc-macro2", "quote", @@ -2594,15 +2531,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.78" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" +checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" dependencies = [ "js-sys", "wasm-bindgen", @@ -2658,6 +2595,49 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5acdd78cb4ba54c0045ac14f62d8f94a03d10047904ae2a40afa1e99d8f70825" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" + +[[package]] +name = "windows_i686_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" + +[[package]] +name = "windows_i686_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" + [[package]] name = "wyz" version = "0.4.0" diff --git a/src/tracker.rs b/src/tracker.rs index 4aaa767be..053119b4a 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -34,13 +34,15 @@ pub enum TrackerMode { PrivateListedMode, } + pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, updates: tokio::sync::RwLock>, shadow: tokio::sync::RwLock>, database: Box, - pub stats_tracker: StatsTracker + pub stats_tracker: StatsTracker, + pub guard: pprof } impl TorrentTracker { @@ -56,7 +58,8 @@ impl TorrentTracker { updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), database, - stats_tracker + stats_tracker, + guard: pprof::ProfilerGuard::new(100).unwrap() }) } @@ -228,15 +231,15 @@ impl TorrentTracker { pub async fn post_log(&self) { let torrents = self.torrents.read().await; - let torrents_size = mem::size_of_val(&*torrents); + let torrents_size = torrents.len(); drop(torrents); let updates = self.updates.read().await; - let updates_size = mem::size_of_val(&*updates); + let updates_size = updates.len(); drop(updates); let shadow = self.shadow.read().await; - let shadow_size = mem::size_of_val(&*shadow); + let shadow_size = shadow.len(); drop(shadow); - info!("Stats [::] Torrents: {} byte(s) | Updates: {} byte(s) | Shadow: {} byte(s)", torrents_size, updates_size, shadow_size); + info!("-=[ Stats ]=- | Torrents: {} | Updates: {} | Shadow: {}", torrents_size, updates_size, shadow_size); } // remove torrents without peers if enabled, and defragment memory From 97c17d4a5bb784c43188def982a58c3ad63c5b60 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 13 Apr 2022 19:46:06 +0200 Subject: [PATCH 014/435] Oops :) --- src/tracker.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/tracker.rs b/src/tracker.rs index 053119b4a..defdecc1f 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -41,8 +41,7 @@ pub struct TorrentTracker { updates: tokio::sync::RwLock>, shadow: tokio::sync::RwLock>, database: Box, - pub stats_tracker: StatsTracker, - pub guard: pprof + pub stats_tracker: StatsTracker } impl TorrentTracker { @@ -58,8 +57,7 @@ impl TorrentTracker { updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), database, - stats_tracker, - guard: pprof::ProfilerGuard::new(100).unwrap() + stats_tracker }) } From 145780a6b770431eec99d3eeeb7dfce247747e3b Mon Sep 17 00:00:00 2001 From: Power2All Date: Fri, 15 Apr 2022 08:57:25 +0200 Subject: [PATCH 015/435] Adding profiling and updates and cleanups --- Cargo.lock | 91 ++++++++++++++++++++++++++++++++++++++++++++++---- Cargo.toml | 5 +++ src/main.rs | 7 ++++ src/tracker.rs | 2 -- 4 files changed, 96 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7bc7233c8..eb04ba651 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,15 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +dependencies = [ + "gimli", +] + [[package]] name = "adler" version = "1.0.2" @@ -86,6 +95,21 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "backtrace" +version = "0.3.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide 0.4.4", + "object", + "rustc-demangle", +] + [[package]] name = "base-x" version = "0.2.8" @@ -375,6 +399,21 @@ dependencies = [ "syn", ] +[[package]] +name = "dhat" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47003dc9f6368a88e85956c3b2573a7e6872746a3e5d762a8885da3a136a0381" +dependencies = [ + "backtrace", + "lazy_static", + "parking_lot 0.11.2", + "rustc-hash", + "serde 1.0.136", + "serde_json", + "thousands", +] + [[package]] name = "digest" version = "0.9.0" @@ -459,7 +498,7 @@ dependencies = [ "crc32fast", "libc", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.5.1", ] [[package]] @@ -673,6 +712,12 @@ dependencies = [ "wasi 0.10.0+wasi-snapshot-preview1", ] +[[package]] +name = "gimli" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" + [[package]] name = "glob" version = "0.3.0" @@ -1020,6 +1065,16 @@ dependencies = [ "unicase", ] +[[package]] +name = "miniz_oxide" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +dependencies = [ + "adler", + "autocfg", +] + [[package]] name = "miniz_oxide" version = "0.5.1" @@ -1253,6 +1308,15 @@ dependencies = [ "libc", ] +[[package]] +name = "object" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" version = "1.10.0" @@ -1593,6 +1657,12 @@ dependencies = [ "serde 1.0.136", ] +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -2057,6 +2127,12 @@ dependencies = [ "syn", ] +[[package]] +name = "thousands" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" + [[package]] name = "time" version = "0.1.44" @@ -2217,9 +2293,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde 1.0.136", ] @@ -2235,6 +2311,7 @@ dependencies = [ "chrono", "config", "derive_more", + "dhat", "fern", "futures", "hex", @@ -2262,9 +2339,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b9fa4360528139bc96100c160b7ae879f5567f49f1782b0b02035b0358ebf3" +checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" dependencies = [ "cfg-if", "log", @@ -2286,9 +2363,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dfce9f3241b150f36e8e54bb561a742d5daa1a47b5dd9a5ce369fd4a4db2210" +checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" dependencies = [ "lazy_static", ] diff --git a/Cargo.toml b/Cargo.toml index 81f76abe9..9871f20a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,8 +7,12 @@ description = "A feature rich BitTorrent tracker." edition = "2018" [profile.release] +debug = 1 lto = "fat" +[features] +dhat-heap = [] # if you are doing heap profiling + [dependencies] serde = {version = "1.0", features = ["derive"]} serde_bencode = "^0.2.3" @@ -34,3 +38,4 @@ thiserror = "1.0" aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" +dhat = "0.3.0" diff --git a/src/main.rs b/src/main.rs index 9ba45427e..63eb2ec0a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,8 +5,15 @@ use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; use torrust_tracker::torrust_http_tracker::server::HttpServer; +#[cfg(feature = "dhat-heap")] +#[global_allocator] +static ALLOC: dhat::Alloc = dhat::Alloc; + #[tokio::main] async fn main() { + #[cfg(feature = "dhat-heap")] + let _profiler = dhat::Profiler::new_heap(); + // torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), diff --git a/src/tracker.rs b/src/tracker.rs index defdecc1f..3a69e3e0b 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -6,7 +6,6 @@ use crate::common::{InfoHash}; use std::net::{SocketAddr}; use crate::{Configuration, database, key_manager}; use std::collections::btree_map::Entry; -use std::mem; use std::sync::Arc; use log::info; use crate::key_manager::AuthKey; @@ -34,7 +33,6 @@ pub enum TrackerMode { PrivateListedMode, } - pub struct TorrentTracker { pub config: Arc, torrents: tokio::sync::RwLock>, From 9159187f312bbbc529df662f05d5b2f5bce6fa50 Mon Sep 17 00:00:00 2001 From: Power2All Date: Fri, 15 Apr 2022 14:07:42 +0200 Subject: [PATCH 016/435] First implementation and iteration of using crossbeam to eventually handle the torrent data, should improve and minimize the memory usage drastically, so to keep the memory object at a single place --- Cargo.lock | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++++ Cargo.toml | 2 ++ src/main.rs | 55 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 128 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index eb04ba651..00e23092c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -365,6 +365,75 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" +dependencies = [ + "cfg-if", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "lazy_static", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +dependencies = [ + "cfg-if", + "lazy_static", +] + [[package]] name = "crypto-common" version = "0.1.3" @@ -2310,6 +2379,8 @@ dependencies = [ "byteorder", "chrono", "config", + "crossbeam", + "crossbeam-channel", "derive_more", "dhat", "fern", diff --git a/Cargo.toml b/Cargo.toml index 9871f20a4..408a07b85 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,3 +39,5 @@ aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" dhat = "0.3.0" +crossbeam = "0.8.1" +crossbeam-channel = "0.5.4" diff --git a/src/main.rs b/src/main.rs index 63eb2ec0a..2943e4ca7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; use std::sync::Arc; +use crossbeam_channel::bounded; use log::{info}; use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; @@ -9,11 +10,28 @@ use torrust_tracker::torrust_http_tracker::server::HttpServer; #[global_allocator] static ALLOC: dhat::Alloc = dhat::Alloc; +pub struct DataStream { + action: u8, + data: Vec<()> +} + #[tokio::main] async fn main() { #[cfg(feature = "dhat-heap")] let _profiler = dhat::Profiler::new_heap(); + // Loading configuration + let config = match Configuration::load_from_file() { + Ok(config) => config, + Err(error) => { + panic!("{}", error) + } + }; + + // Start the thread where data is being exchanged for usaga + let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(1); + let _torrents_memory_handler = start_torrents_memory_handler(&config, sender.clone(), receiver.clone()); + // torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), @@ -97,10 +115,47 @@ async fn main() { let _ = tracker.periodic_saving().await; info!("Torrents saved"); } + + // Closing down channel + sender.clone().send(DataStream{ + action: ACTION_CLOSE_CHANNEL, + data: Vec::new() + }); } } } + +const ACTION_CLOSE_CHANNEL: u8 = 0; +const ACTION_READ_TORRENTS: u8 = 1; +const ACTION_WRITE_TORRENTS: u8 = 2; +const ACTION_UPDATE_TORRENTS: u8 = 3; +const ACTION_READ_PEERS: u8 = 4; +const ACTION_WRITE_PEERS: u8 = 5; +const ACTION_UPDATE_PEERS: u8 = 6; +fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_channel::Sender, receiver: crossbeam_channel::Receiver) -> Option> { + // This is our main memory handler, everything will be received, handled and send back. + return Some(tokio::spawn(async move { + loop { + // Wait for incoming data. + let data: DataStream = receiver.recv().unwrap(); + + // Lets check what action is given. + match data.action { + ACTION_CLOSE_CHANNEL => { + info!("Ending the memory handler thread..."); + sender.send(DataStream{ + action: ACTION_CLOSE_CHANNEL, + data: Vec::new() + }); + break; + } + _ => {} + } + } + })); +} + fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> Option> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.persistence_interval.unwrap_or(900); From 03eade11ee9d33dc5e786d9630464dc3c5394ccc Mon Sep 17 00:00:00 2001 From: Power2All Date: Fri, 15 Apr 2022 14:25:54 +0200 Subject: [PATCH 017/435] Moving around, and preparing a refactoring to channel based handling --- src/main.rs | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/src/main.rs b/src/main.rs index 2943e4ca7..b46fc4c71 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,8 +28,11 @@ async fn main() { } }; + // Enable logging handling + logging::setup_logging(&config); + // Start the thread where data is being exchanged for usaga - let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(1); + let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(0); let _torrents_memory_handler = start_torrents_memory_handler(&config, sender.clone(), receiver.clone()); // torrust config @@ -45,8 +48,6 @@ async fn main() { panic!("{}", e) })); - logging::setup_logging(&config); - // load persistent torrents if enabled if config.persistence { info!("Loading persistent torrents into memory..."); @@ -117,7 +118,7 @@ async fn main() { } // Closing down channel - sender.clone().send(DataStream{ + let _ = sender.clone().send(DataStream{ action: ACTION_CLOSE_CHANNEL, data: Vec::new() }); @@ -135,6 +136,8 @@ const ACTION_WRITE_PEERS: u8 = 5; const ACTION_UPDATE_PEERS: u8 = 6; fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_channel::Sender, receiver: crossbeam_channel::Receiver) -> Option> { // This is our main memory handler, everything will be received, handled and send back. + info!("Starting memory handler thread..."); + return Some(tokio::spawn(async move { loop { // Wait for incoming data. @@ -144,11 +147,29 @@ fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_chann match data.action { ACTION_CLOSE_CHANNEL => { info!("Ending the memory handler thread..."); - sender.send(DataStream{ + let _ = sender.send(DataStream{ action: ACTION_CLOSE_CHANNEL, data: Vec::new() }); break; + } + ACTION_READ_TORRENTS => { + + } + ACTION_WRITE_TORRENTS => { + + } + ACTION_UPDATE_TORRENTS => { + + } + ACTION_READ_PEERS => { + + } + ACTION_WRITE_PEERS => { + + } + ACTION_UPDATE_PEERS => { + } _ => {} } From 0a858dc067d6b1f4fbf48e1b8a373655df89d7b9 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sat, 16 Apr 2022 12:48:55 +0200 Subject: [PATCH 018/435] Revert "Moving around, and preparing a refactoring to channel based handling" This reverts commit 03eade11ee9d33dc5e786d9630464dc3c5394ccc. --- src/main.rs | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) diff --git a/src/main.rs b/src/main.rs index b46fc4c71..2943e4ca7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,11 +28,8 @@ async fn main() { } }; - // Enable logging handling - logging::setup_logging(&config); - // Start the thread where data is being exchanged for usaga - let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(0); + let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(1); let _torrents_memory_handler = start_torrents_memory_handler(&config, sender.clone(), receiver.clone()); // torrust config @@ -48,6 +45,8 @@ async fn main() { panic!("{}", e) })); + logging::setup_logging(&config); + // load persistent torrents if enabled if config.persistence { info!("Loading persistent torrents into memory..."); @@ -118,7 +117,7 @@ async fn main() { } // Closing down channel - let _ = sender.clone().send(DataStream{ + sender.clone().send(DataStream{ action: ACTION_CLOSE_CHANNEL, data: Vec::new() }); @@ -136,8 +135,6 @@ const ACTION_WRITE_PEERS: u8 = 5; const ACTION_UPDATE_PEERS: u8 = 6; fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_channel::Sender, receiver: crossbeam_channel::Receiver) -> Option> { // This is our main memory handler, everything will be received, handled and send back. - info!("Starting memory handler thread..."); - return Some(tokio::spawn(async move { loop { // Wait for incoming data. @@ -147,29 +144,11 @@ fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_chann match data.action { ACTION_CLOSE_CHANNEL => { info!("Ending the memory handler thread..."); - let _ = sender.send(DataStream{ + sender.send(DataStream{ action: ACTION_CLOSE_CHANNEL, data: Vec::new() }); break; - } - ACTION_READ_TORRENTS => { - - } - ACTION_WRITE_TORRENTS => { - - } - ACTION_UPDATE_TORRENTS => { - - } - ACTION_READ_PEERS => { - - } - ACTION_WRITE_PEERS => { - - } - ACTION_UPDATE_PEERS => { - } _ => {} } From bd680c4e10ff3c2c672145a2bed59a98bf7f5d3b Mon Sep 17 00:00:00 2001 From: Power2All Date: Sat, 16 Apr 2022 12:49:00 +0200 Subject: [PATCH 019/435] Revert "First implementation and iteration of using crossbeam to eventually handle the torrent data, should improve and minimize the memory usage drastically, so to keep the memory object at a single place" This reverts commit 9159187f312bbbc529df662f05d5b2f5bce6fa50. --- Cargo.lock | 71 ----------------------------------------------------- Cargo.toml | 2 -- src/main.rs | 55 ----------------------------------------- 3 files changed, 128 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00e23092c..eb04ba651 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -365,75 +365,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" -dependencies = [ - "cfg-if", - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-epoch", - "crossbeam-queue", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" -dependencies = [ - "cfg-if", - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "lazy_static", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" -dependencies = [ - "cfg-if", - "lazy_static", -] - [[package]] name = "crypto-common" version = "0.1.3" @@ -2379,8 +2310,6 @@ dependencies = [ "byteorder", "chrono", "config", - "crossbeam", - "crossbeam-channel", "derive_more", "dhat", "fern", diff --git a/Cargo.toml b/Cargo.toml index 408a07b85..9871f20a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,5 +39,3 @@ aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" dhat = "0.3.0" -crossbeam = "0.8.1" -crossbeam-channel = "0.5.4" diff --git a/src/main.rs b/src/main.rs index 2943e4ca7..63eb2ec0a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,5 @@ use std::net::SocketAddr; use std::sync::Arc; -use crossbeam_channel::bounded; use log::{info}; use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; @@ -10,28 +9,11 @@ use torrust_tracker::torrust_http_tracker::server::HttpServer; #[global_allocator] static ALLOC: dhat::Alloc = dhat::Alloc; -pub struct DataStream { - action: u8, - data: Vec<()> -} - #[tokio::main] async fn main() { #[cfg(feature = "dhat-heap")] let _profiler = dhat::Profiler::new_heap(); - // Loading configuration - let config = match Configuration::load_from_file() { - Ok(config) => config, - Err(error) => { - panic!("{}", error) - } - }; - - // Start the thread where data is being exchanged for usaga - let (sender, receiver): (crossbeam_channel::Sender, crossbeam_channel::Receiver) = bounded(1); - let _torrents_memory_handler = start_torrents_memory_handler(&config, sender.clone(), receiver.clone()); - // torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), @@ -115,47 +97,10 @@ async fn main() { let _ = tracker.periodic_saving().await; info!("Torrents saved"); } - - // Closing down channel - sender.clone().send(DataStream{ - action: ACTION_CLOSE_CHANNEL, - data: Vec::new() - }); } } } - -const ACTION_CLOSE_CHANNEL: u8 = 0; -const ACTION_READ_TORRENTS: u8 = 1; -const ACTION_WRITE_TORRENTS: u8 = 2; -const ACTION_UPDATE_TORRENTS: u8 = 3; -const ACTION_READ_PEERS: u8 = 4; -const ACTION_WRITE_PEERS: u8 = 5; -const ACTION_UPDATE_PEERS: u8 = 6; -fn start_torrents_memory_handler(config: &Configuration, sender: crossbeam_channel::Sender, receiver: crossbeam_channel::Receiver) -> Option> { - // This is our main memory handler, everything will be received, handled and send back. - return Some(tokio::spawn(async move { - loop { - // Wait for incoming data. - let data: DataStream = receiver.recv().unwrap(); - - // Lets check what action is given. - match data.action { - ACTION_CLOSE_CHANNEL => { - info!("Ending the memory handler thread..."); - sender.send(DataStream{ - action: ACTION_CLOSE_CHANNEL, - data: Vec::new() - }); - break; - } - _ => {} - } - } - })); -} - fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> Option> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.persistence_interval.unwrap_or(900); From 509d487f8040a5471d7fa835c39df2aeb476e0de Mon Sep 17 00:00:00 2001 From: Power2All Date: Sat, 16 Apr 2022 13:02:20 +0200 Subject: [PATCH 020/435] Code cleanup and fixing MySQL --- Cargo.toml | 8 +++--- src/common.rs | 7 +++-- src/config.rs | 32 +++++++++++---------- src/database.rs | 12 ++++---- src/http_api_server.rs | 29 ++++++++++--------- src/key_manager.rs | 18 ++++++------ src/lib.rs | 14 ++++++---- src/logging.rs | 1 + src/main.rs | 6 ++-- src/mysql_database.rs | 42 +++++++++++++++------------- src/sqlite_database.rs | 42 +++++++++++++++------------- src/torrent.rs | 10 ++++--- src/torrust_http_tracker/errors.rs | 2 +- src/torrust_http_tracker/filters.rs | 26 +++++++++-------- src/torrust_http_tracker/handlers.rs | 12 ++++---- src/torrust_http_tracker/mod.rs | 16 +++++------ src/torrust_http_tracker/request.rs | 6 ++-- src/torrust_http_tracker/response.rs | 9 +++--- src/torrust_http_tracker/routes.rs | 12 ++++---- src/torrust_http_tracker/server.rs | 1 + src/torrust_udp_tracker/handlers.rs | 18 ++++++------ src/torrust_udp_tracker/mod.rs | 10 +++---- src/torrust_udp_tracker/request.rs | 7 +++-- src/torrust_udp_tracker/server.rs | 8 ++++-- src/tracker.rs | 42 +++++++++++++++------------- src/tracker_stats.rs | 9 +++--- src/utils.rs | 5 ++-- 27 files changed, 222 insertions(+), 182 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9871f20a4..a10d548c2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,17 +14,17 @@ lto = "fat" dhat-heap = [] # if you are doing heap profiling [dependencies] -serde = {version = "1.0", features = ["derive"]} +serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" serde_bytes = "0.11" serde_json = "1.0.72" hex = "0.4.3" percent-encoding = "2.1.0" -warp = {version = "0.3", features = ["tls"]} -tokio = {version = "1.7", features = ["full"]} +warp = { version = "0.3", features = ["tls"] } +tokio = { version = "1.7", features = ["full"] } binascii = "0.1" toml = "0.5" -log = {version = "0.4", features = ["release_max_level_info"]} +log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" byteorder = "1" diff --git a/src/common.rs b/src/common.rs index 4d2f5ec71..5d69ed0e1 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,5 +1,5 @@ -use serde::{Deserialize, Serialize}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use serde::{Deserialize, Serialize}; pub const MAX_SCRAPE_TORRENTS: u8 = 74; pub const AUTH_KEY_LENGTH: usize = 32; @@ -19,7 +19,7 @@ pub enum AnnounceEventDef { Started, Stopped, Completed, - None + None, } #[derive(Serialize, Deserialize)] @@ -135,7 +135,7 @@ impl PeerId { String::from(std::str::from_utf8(bytes_out).unwrap()) } else { "".to_string() - } + }; } } @@ -218,6 +218,7 @@ impl PeerId { } } } + impl Serialize for PeerId { fn serialize(&self, serializer: S) -> Result where diff --git a/src/config.rs b/src/config.rs index 67078d608..7130d8c92 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,19 +1,21 @@ -pub use crate::tracker::TrackerMode; -use serde::{Serialize, Deserialize, Serializer}; use std; use std::collections::HashMap; use std::fs; -use toml; -use std::net::{IpAddr}; +use std::net::IpAddr; use std::path::Path; use std::str::FromStr; -use config::{ConfigError, Config, File}; + +use config::{Config, ConfigError, File}; +use serde::{Deserialize, Serialize, Serializer}; +use toml; + use crate::database::DatabaseDrivers; +pub use crate::tracker::TrackerMode; #[derive(Serialize, Deserialize, PartialEq)] pub enum TrackerServer { UDP, - HTTP + HTTP, } #[derive(Serialize, Deserialize, Debug)] @@ -30,7 +32,7 @@ pub struct HttpTrackerConfig { #[serde(serialize_with = "none_as_empty_string")] pub ssl_cert_path: Option, #[serde(serialize_with = "none_as_empty_string")] - pub ssl_key_path: Option + pub ssl_key_path: Option, } impl HttpTrackerConfig { @@ -113,7 +115,7 @@ impl Configuration { match Self::load(data.as_slice()) { Ok(cfg) => { Ok(cfg) - }, + } Err(e) => Err(ConfigurationError::ParseError(e)), } } @@ -156,21 +158,21 @@ impl Configuration { enabled: true, bind_address: String::from("127.0.0.1:1212"), access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), - } + }, }; configuration.udp_trackers.push( - UdpTrackerConfig{ + UdpTrackerConfig { enabled: false, - bind_address: String::from("0.0.0.0:6969") + bind_address: String::from("0.0.0.0:6969"), } ); configuration.http_trackers.push( - HttpTrackerConfig{ + HttpTrackerConfig { enabled: false, bind_address: String::from("0.0.0.0:6969"), ssl_enabled: false, ssl_cert_path: None, - ssl_key_path: None + ssl_key_path: None, } ); configuration @@ -188,7 +190,7 @@ impl Configuration { eprintln!("Creating config file.."); let config = Configuration::default(); let _ = config.save_to_file(); - return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))) + return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))); } let torrust_config: Configuration = config.try_into().map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; @@ -196,7 +198,7 @@ impl Configuration { Ok(torrust_config) } - pub fn save_to_file(&self) -> Result<(), ()>{ + pub fn save_to_file(&self) -> Result<(), ()> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write("config.toml", toml_string).expect("Could not write to file!"); Ok(()) diff --git a/src/database.rs b/src/database.rs index 18bf41994..a90161e91 100644 --- a/src/database.rs +++ b/src/database.rs @@ -1,18 +1,20 @@ use std::collections::BTreeMap; -use crate::{InfoHash}; -use crate::key_manager::AuthKey; -use crate::sqlite_database::SqliteDatabase; + use async_trait::async_trait; use derive_more::{Display, Error}; use log::debug; +use serde::{Deserialize, Serialize}; + +use crate::InfoHash; +use crate::key_manager::AuthKey; use crate::mysql_database::MysqlDatabase; -use serde::{Serialize, Deserialize}; +use crate::sqlite_database::SqliteDatabase; use crate::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { Sqlite3, - MySQL + MySQL, } pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result, r2d2::Error> { diff --git a/src/http_api_server.rs b/src/http_api_server.rs index eff45fc33..89505cb09 100644 --- a/src/http_api_server.rs +++ b/src/http_api_server.rs @@ -1,10 +1,13 @@ -use crate::tracker::{TorrentTracker}; -use serde::{Deserialize, Serialize}; use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use warp::{filters, reply, reply::Reply, serve, Filter, Server}; + +use serde::{Deserialize, Serialize}; +use warp::{Filter, filters, reply, reply::Reply, serve, Server}; + use crate::torrent::TorrentPeer; +use crate::tracker::TorrentTracker; + use super::common::*; #[derive(Deserialize, Debug)] @@ -52,7 +55,7 @@ enum ActionStatus<'a> { impl warp::reject::Reject for ActionStatus<'static> {} -fn authenticate(tokens: HashMap) -> impl Filter + Clone { +fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] struct AuthToken { token: Option, @@ -69,7 +72,7 @@ fn authenticate(tokens: HashMap) -> impl Filter { if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { reason: "token not valid".into() })) + return Err(warp::reject::custom(ActionStatus::Err { reason: "token not valid".into() })); } Ok(()) @@ -81,7 +84,7 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> Server + Clone + Send + Sync + 'static> { +pub fn build_server(tracker: Arc) -> Server + Clone + Send + Sync + 'static> { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -131,7 +134,7 @@ pub fn build_server(tracker: Arc) -> Server| { async move { - let mut results = Stats{ + let mut results = Stats { torrents: 0, seeders: 0, completed: 0, @@ -147,7 +150,7 @@ pub fn build_server(tracker: Arc) -> Server = db @@ -195,7 +198,7 @@ pub fn build_server(tracker: Arc) -> Server) -> Server)| { async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to remove torrent from whitelist".into() })) - } + match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to remove torrent from whitelist".into() })) + } } }); diff --git a/src/key_manager.rs b/src/key_manager.rs index b1f16f1dc..507402358 100644 --- a/src/key_manager.rs +++ b/src/key_manager.rs @@ -1,10 +1,12 @@ -use super::common::AUTH_KEY_LENGTH; -use crate::utils::current_time; -use rand::{thread_rng, Rng}; +use derive_more::{Display, Error}; +use log::debug; +use rand::{Rng, thread_rng}; use rand::distributions::Alphanumeric; use serde::Serialize; -use log::debug; -use derive_more::{Display, Error}; + +use crate::utils::current_time; + +use super::common::AUTH_KEY_LENGTH; pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { let key: String = thread_rng() @@ -23,8 +25,8 @@ pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { let current_time = current_time(); - if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid) } - if auth_key.valid_until.unwrap() < current_time { return Err(Error::KeyExpired) } + if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } + if auth_key.valid_until.unwrap() < current_time { return Err(Error::KeyExpired); } Ok(()) } @@ -67,7 +69,7 @@ pub enum Error { #[display(fmt = "Key is invalid.")] KeyInvalid, #[display(fmt = "Key has expired.")] - KeyExpired + KeyExpired, } impl From for Error { diff --git a/src/lib.rs b/src/lib.rs index 3d928aff4..b6cebfc5e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,11 @@ +pub use torrust_http_tracker::server::*; +pub use torrust_udp_tracker::server::*; + +pub use self::common::*; +pub use self::config::*; +pub use self::http_api_server::*; +pub use self::tracker::*; + pub mod config; pub mod tracker; pub mod http_api_server; @@ -13,9 +21,3 @@ pub mod mysql_database; pub mod torrent; pub mod tracker_stats; -pub use self::config::*; -pub use torrust_udp_tracker::server::*; -pub use torrust_http_tracker::server::*; -pub use self::tracker::*; -pub use self::http_api_server::*; -pub use self::common::*; diff --git a/src/logging.rs b/src/logging.rs index 580e35094..c2e77551f 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -1,4 +1,5 @@ use log::info; + use crate::Configuration; pub fn setup_logging(cfg: &Configuration) { diff --git a/src/main.rs b/src/main.rs index 63eb2ec0a..b17ef14fe 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,7 +1,9 @@ use std::net::SocketAddr; use std::sync::Arc; -use log::{info}; + +use log::info; use tokio::task::JoinHandle; + use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; use torrust_tracker::torrust_http_tracker::server::HttpServer; @@ -12,7 +14,7 @@ static ALLOC: dhat::Alloc = dhat::Alloc; #[tokio::main] async fn main() { #[cfg(feature = "dhat-heap")] - let _profiler = dhat::Profiler::new_heap(); + let _profiler = dhat::Profiler::new_heap(); // torrust config let config = match Configuration::load_from_file() { diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 0597d46aa..be1fe649b 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -1,18 +1,20 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, database}; -use log::debug; -use r2d2::{Pool}; -use crate::key_manager::AuthKey; use std::str::FromStr; -use crate::database::Database; + use async_trait::async_trait; +use log::debug; +use r2d2::Pool; use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; + +use crate::{AUTH_KEY_LENGTH, database, InfoHash}; +use crate::database::Database; +use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; pub struct MysqlDatabase { - pool: Pool + pool: Pool, } impl MysqlDatabase { @@ -79,8 +81,8 @@ impl Database for MysqlDatabase { let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.exec_drop("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", (info_hash.to_string(), completed.to_string())); + let (_seeders, completed, _leechers) = torrent_entry.get_stats(); + let _ = db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (?, ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())); } let _ = db_transaction.commit(); @@ -93,13 +95,13 @@ impl Database for MysqlDatabase { match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash => info_hash }) .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some(info_hash) => { - Ok(InfoHash::from_str(&info_hash).unwrap()) - }, - None => { - Err(database::Error::InvalidQuery) - } + Some(info_hash) => { + Ok(InfoHash::from_str(&info_hash).unwrap()) } + None => { + Err(database::Error::InvalidQuery) + } + } } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { @@ -110,7 +112,7 @@ impl Database for MysqlDatabase { match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { Ok(_) => { Ok(1) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -126,7 +128,7 @@ impl Database for MysqlDatabase { match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { Ok(_) => { Ok(1) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -142,9 +144,9 @@ impl Database for MysqlDatabase { Some((key, valid_until)) => { Ok(AuthKey { key, - valid_until: Some(valid_until as u64) + valid_until: Some(valid_until as u64), }) - }, + } None => { Err(database::Error::InvalidQuery) } @@ -160,7 +162,7 @@ impl Database for MysqlDatabase { match conn.exec_drop("INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", params! { key, valid_until }) { Ok(_) => { Ok(1) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -174,7 +176,7 @@ impl Database for MysqlDatabase { match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { Ok(_) => { Ok(1) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index 5facd99d8..fa519ffd0 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -1,17 +1,19 @@ use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, database}; +use std::str::FromStr; + +use async_trait::async_trait; use log::debug; -use r2d2_sqlite::{SqliteConnectionManager}; -use r2d2::{Pool}; +use r2d2::Pool; +use r2d2_sqlite::SqliteConnectionManager; use r2d2_sqlite::rusqlite::NO_PARAMS; -use crate::key_manager::AuthKey; -use std::str::FromStr; + +use crate::{AUTH_KEY_LENGTH, database, InfoHash}; use crate::database::Database; -use async_trait::async_trait; +use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; pub struct SqliteDatabase { - pool: Pool + pool: Pool, } impl SqliteDatabase { @@ -68,7 +70,7 @@ impl Database for SqliteDatabase { Ok((info_hash, completed)) })?; - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok() ).collect(); + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok()).collect(); Ok(torrents) } @@ -79,8 +81,8 @@ impl Database for SqliteDatabase { let db_transaction = conn.transaction()?; for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let (_seeders, completed, _leechers) = torrent_entry.get_stats(); + let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); } let _ = db_transaction.commit(); @@ -109,9 +111,9 @@ impl Database for SqliteDatabase { match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { Ok(updated) => { - if updated > 0 { return Ok(updated) } + if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -124,9 +126,9 @@ impl Database for SqliteDatabase { match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { Ok(updated) => { - if updated > 0 { return Ok(updated) } + if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -146,7 +148,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(valid_until_i64 as u64) + valid_until: Some(valid_until_i64 as u64), }) } else { Err(database::Error::QueryReturnedNoRows) @@ -157,12 +159,12 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()] + &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], ) { Ok(updated) => { - if updated > 0 { return Ok(updated) } + if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -175,9 +177,9 @@ impl Database for SqliteDatabase { match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { Ok(updated) => { - if updated > 0 { return Ok(updated) } + if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) - }, + } Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) diff --git a/src/torrent.rs b/src/torrent.rs index ef933d224..e2984a490 100644 --- a/src/torrent.rs +++ b/src/torrent.rs @@ -1,10 +1,12 @@ use std::borrow::Cow; use std::net::{IpAddr, SocketAddr}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; + use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; -use crate::torrust_http_tracker::AnnounceRequest; use crate::common::{AnnounceEventDef, NumberOfBytesDef}; +use crate::torrust_http_tracker::AnnounceRequest; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] pub struct TorrentPeer { @@ -33,7 +35,7 @@ impl TorrentPeer { uploaded: announce_request.bytes_uploaded, downloaded: announce_request.bytes_downloaded, left: announce_request.bytes_left, - event: announce_request.event + event: announce_request.event, } } @@ -58,7 +60,7 @@ impl TorrentPeer { uploaded: NumberOfBytes(announce_request.uploaded as i64), downloaded: NumberOfBytes(announce_request.downloaded as i64), left: NumberOfBytes(announce_request.left as i64), - event + event, } } diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs index d8d6c7623..fe0cf26e6 100644 --- a/src/torrust_http_tracker/errors.rs +++ b/src/torrust_http_tracker/errors.rs @@ -1,5 +1,5 @@ -use warp::reject::Reject; use thiserror::Error; +use warp::reject::Reject; #[derive(Error, Debug)] pub enum ServerError { diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs index 61fa20a45..5c4fc9743 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/torrust_http_tracker/filters.rs @@ -2,43 +2,45 @@ use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; + use log::debug; use warp::{Filter, reject, Rejection}; + use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; /// Pass Arc along -pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { +pub fn with_tracker(tracker: Arc) -> impl Filter, ), Error=Infallible> + Clone { warp::any() .map(move || tracker.clone()) } /// Check for infoHash -pub fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { +pub fn with_info_hash() -> impl Filter, ), Error=Rejection> + Clone { warp::filters::query::raw() .and_then(info_hashes) } /// Check for PeerId -pub fn with_peer_id() -> impl Filter + Clone { +pub fn with_peer_id() -> impl Filter + Clone { warp::filters::query::raw() .and_then(peer_id) } /// Pass Arc along -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key() -> impl Filter, ), Error=Infallible> + Clone { warp::path::param::() .map(|key: String| { AuthKey::from_string(&key) }) .or_else(|_| async { - Ok::<(Option,), Infallible>((None,)) + Ok::<(Option, ), Infallible>((None, )) }) } /// Check for PeerAddress -pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { +pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { warp::addr::remote() .and(warp::header::optional::("X-Forwarded-For")) .map(move |remote_addr: Option, x_forwarded_for: Option| { @@ -48,7 +50,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) @@ -57,7 +59,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) @@ -129,11 +131,11 @@ async fn peer_id(raw_query: String) -> WebResult { /// Get PeerAddress from RemoteAddress or Forwarded async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)) + return Err(reject::custom(ServerError::AddressNotFound)); } if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)) + return Err(reject::custom(ServerError::AddressNotFound)); } match on_reverse_proxy { @@ -150,7 +152,7 @@ async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Opti debug!("{}", e); Err(reject::custom(ServerError::AddressNotFound)) }) - }, + } false => Ok(remote_addr.unwrap().ip()) } } @@ -166,7 +168,7 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has port: announce_request_query.port, left: announce_request_query.left.unwrap_or(0), event: announce_request_query.event, - compact: announce_request_query.compact + compact: announce_request_query.compact, }) } diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 8762faeaf..994b7b765 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -2,9 +2,11 @@ use std::collections::HashMap; use std::convert::Infallible; use std::net::IpAddr; use std::sync::Arc; + use log::debug; use warp::{reject, Rejection, Reply}; -use warp::http::{Response}; +use warp::http::Response; + use crate::{InfoHash, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrent::{TorrentError, TorrentPeer, TorrentStats}; @@ -34,7 +36,7 @@ pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, trac /// Handle announce request pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc) -> WebResult { if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { - return Err(reject::custom(e)) + return Err(reject::custom(e)); } debug!("{:?}", announce_request); @@ -63,7 +65,7 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { @@ -94,7 +96,7 @@ fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: Tor let http_peers: Vec = peers.iter().map(|peer| Peer { peer_id: peer.peer_id.to_string(), ip: peer.peer_addr.ip(), - port: peer.peer_addr.port() + port: peer.peer_addr.port(), }).collect(); let res = AnnounceResponse { @@ -102,7 +104,7 @@ fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: Tor interval_min, complete: torrent_stats.seeders, incomplete: torrent_stats.leechers, - peers: http_peers + peers: http_peers, }; // check for compact response request diff --git a/src/torrust_http_tracker/mod.rs b/src/torrust_http_tracker/mod.rs index ea6675dce..07d077577 100644 --- a/src/torrust_http_tracker/mod.rs +++ b/src/torrust_http_tracker/mod.rs @@ -1,3 +1,11 @@ +pub use self::errors::*; +pub use self::filters::*; +pub use self::handlers::*; +pub use self::request::*; +pub use self::response::*; +pub use self::routes::*; +pub use self::server::*; + pub mod server; pub mod request; pub mod response; @@ -6,13 +14,5 @@ pub mod routes; pub mod handlers; pub mod filters; -pub use self::server::*; -pub use self::request::*; -pub use self::response::*; -pub use self::errors::*; -pub use self::routes::*; -pub use self::handlers::*; -pub use self::filters::*; - pub type Bytes = u64; pub type WebResult = std::result::Result; diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs index 0fb316671..487e53a13 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/torrust_http_tracker/request.rs @@ -1,5 +1,7 @@ -use std::net::{IpAddr}; -use serde::{Deserialize}; +use std::net::IpAddr; + +use serde::Deserialize; + use crate::{InfoHash, PeerId}; use crate::torrust_http_tracker::Bytes; diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs index af27bc5e9..f57129cde 100644 --- a/src/torrust_http_tracker/response.rs +++ b/src/torrust_http_tracker/response.rs @@ -2,7 +2,8 @@ use std::collections::HashMap; use std::error::Error; use std::io::Write; use std::net::IpAddr; -use serde::{Serialize}; + +use serde::Serialize; #[derive(Serialize)] pub struct Peer { @@ -18,7 +19,7 @@ pub struct AnnounceResponse { //pub tracker_id: String, pub complete: u32, pub incomplete: u32, - pub peers: Vec + pub peers: Vec, } impl AnnounceResponse { @@ -75,7 +76,7 @@ pub struct ScrapeResponseEntry { #[derive(Serialize)] pub struct ScrapeResponse { - pub files: HashMap + pub files: HashMap, } impl ScrapeResponse { @@ -87,7 +88,7 @@ impl ScrapeResponse { #[derive(Serialize)] pub struct ErrorResponse { #[serde(rename = "failure reason")] - pub failure_reason: String + pub failure_reason: String, } impl ErrorResponse { diff --git a/src/torrust_http_tracker/routes.rs b/src/torrust_http_tracker/routes.rs index 4b4de722f..fb6bf5c16 100644 --- a/src/torrust_http_tracker/routes.rs +++ b/src/torrust_http_tracker/routes.rs @@ -1,11 +1,13 @@ use std::convert::Infallible; use std::sync::Arc; + use warp::{Filter, Rejection}; + use crate::TorrentTracker; -use crate::torrust_http_tracker::{handle_announce, send_error, handle_scrape, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use crate::torrust_http_tracker::{handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; /// All routes -pub fn routes(tracker: Arc,) -> impl Filter + Clone { +pub fn routes(tracker: Arc) -> impl Filter + Clone { root(tracker.clone()) .or(announce(tracker.clone())) .or(scrape(tracker.clone())) @@ -13,7 +15,7 @@ pub fn routes(tracker: Arc,) -> impl Filter -fn root(tracker: Arc,) -> impl Filter + Clone { +fn root(tracker: Arc) -> impl Filter + Clone { warp::any() .and(warp::filters::method::get()) .and(with_announce_request(tracker.config.on_reverse_proxy)) @@ -23,7 +25,7 @@ fn root(tracker: Arc,) -> impl Filter -fn announce(tracker: Arc,) -> impl Filter + Clone { +fn announce(tracker: Arc) -> impl Filter + Clone { warp::path::path("announce") .and(warp::filters::method::get()) .and(with_announce_request(tracker.config.on_reverse_proxy)) @@ -33,7 +35,7 @@ fn announce(tracker: Arc,) -> impl Filter -fn scrape(tracker: Arc,) -> impl Filter + Clone { +fn scrape(tracker: Arc) -> impl Filter + Clone { warp::path::path("scrape") .and(warp::filters::method::get()) .and(with_scrape_request(tracker.config.on_reverse_proxy)) diff --git a/src/torrust_http_tracker/server.rs b/src/torrust_http_tracker/server.rs index 69811b3d9..336670030 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/torrust_http_tracker/server.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; use std::sync::Arc; + use crate::TorrentTracker; use crate::torrust_http_tracker::routes; diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index df1a15451..3b8ece647 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -1,6 +1,8 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; + use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; + use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; use crate::torrent::{TorrentError, TorrentPeer}; use crate::torrust_udp_tracker::errors::ServerError; @@ -103,15 +105,15 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc leechers: NumberOfPeers(torrent_stats.leechers as i32), seeders: NumberOfPeers(torrent_stats.seeders as i32), peers: peers.iter() - .filter_map(|peer| if let IpAddr::V4(ip) = peer.peer_addr.ip() { + .filter_map(|peer| if let IpAddr::V4(ip) = peer.peer_addr.ip() { Some(ResponsePeer:: { ip_address: ip, - port: Port(peer.peer_addr.port()) + port: Port(peer.peer_addr.port()), }) } else { None } - ).collect() + ).collect(), }) } else { Response::from(AnnounceResponse { @@ -120,15 +122,15 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc leechers: NumberOfPeers(torrent_stats.leechers as i32), seeders: NumberOfPeers(torrent_stats.seeders as i32), peers: peers.iter() - .filter_map(|peer| if let IpAddr::V6(ip) = peer.peer_addr.ip() { + .filter_map(|peer| if let IpAddr::V6(ip) = peer.peer_addr.ip() { Some(ResponsePeer:: { ip_address: ip, - port: Port(peer.peer_addr.port()) + port: Port(peer.peer_addr.port()), }) } else { None } - ).collect() + ).collect(), }) }; @@ -150,7 +152,7 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra for info_hash in request.info_hashes.iter() { let info_hash = InfoHash(info_hash.0); - if authenticate(&info_hash, tracker.clone()).await.is_err() { continue } + if authenticate(&info_hash, tracker.clone()).await.is_err() { continue; } let scrape_entry = match db.get(&info_hash) { Some(torrent_info) => { @@ -182,7 +184,7 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra Ok(Response::from(ScrapeResponse { transaction_id: request.transaction_id, - torrent_stats + torrent_stats, })) } diff --git a/src/torrust_udp_tracker/mod.rs b/src/torrust_udp_tracker/mod.rs index 6aa5fbce0..25780ba93 100644 --- a/src/torrust_udp_tracker/mod.rs +++ b/src/torrust_udp_tracker/mod.rs @@ -1,13 +1,13 @@ +pub use self::errors::*; +pub use self::handlers::*; +pub use self::request::*; +pub use self::server::*; + pub mod errors; pub mod request; pub mod server; pub mod handlers; -pub use self::errors::*; -pub use self::request::*; -pub use self::server::*; -pub use self::handlers::*; - pub type Bytes = u64; pub type Port = u16; pub type TransactionId = i64; diff --git a/src/torrust_udp_tracker/request.rs b/src/torrust_udp_tracker/request.rs index f3f67fdc1..6531f54b9 100644 --- a/src/torrust_udp_tracker/request.rs +++ b/src/torrust_udp_tracker/request.rs @@ -1,5 +1,6 @@ -use aquatic_udp_protocol::{AnnounceRequest}; -use crate::{InfoHash}; +use aquatic_udp_protocol::AnnounceRequest; + +use crate::InfoHash; // struct AnnounceRequest { // pub connection_id: i64, @@ -25,7 +26,7 @@ impl AnnounceRequestWrapper { pub fn new(announce_request: AnnounceRequest) -> Self { AnnounceRequestWrapper { announce_request: announce_request.clone(), - info_hash: InfoHash(announce_request.info_hash.0) + info_hash: InfoHash(announce_request.info_hash.0), } } } diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs index cae1e5b94..8dc34d85d 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/torrust_udp_tracker/server.rs @@ -1,10 +1,12 @@ use std::io::Cursor; -use std::net::{SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; -use aquatic_udp_protocol::{Response}; + +use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::{TorrentTracker}; + +use crate::TorrentTracker; use crate::torrust_udp_tracker::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { diff --git a/src/tracker.rs b/src/tracker.rs index 3a69e3e0b..4f31256ea 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -1,15 +1,17 @@ +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::net::SocketAddr; +use std::sync::Arc; + +use log::info; use serde::{Deserialize, Serialize}; use serde; -use std::collections::BTreeMap; use tokio::sync::{RwLock, RwLockReadGuard}; -use crate::common::{InfoHash}; -use std::net::{SocketAddr}; + use crate::{Configuration, database, key_manager}; -use std::collections::btree_map::Entry; -use std::sync::Arc; -use log::info; +use crate::common::InfoHash; +use crate::database::Database; use crate::key_manager::AuthKey; -use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; use crate::tracker_stats::{StatsTracker, TrackerStats}; @@ -39,7 +41,7 @@ pub struct TorrentTracker { updates: tokio::sync::RwLock>, shadow: tokio::sync::RwLock>, database: Box, - pub stats_tracker: StatsTracker + pub stats_tracker: StatsTracker, } impl TorrentTracker { @@ -55,7 +57,7 @@ impl TorrentTracker { updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), database, - stats_tracker + stats_tracker, }) } @@ -75,7 +77,7 @@ impl TorrentTracker { let auth_key = key_manager::generate_auth_key(seconds_valid); // add key to database - if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error) } + if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } Ok(auth_key) } @@ -91,18 +93,18 @@ impl TorrentTracker { pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode - if self.is_public() { return Ok(()) } + if self.is_public() { return Ok(()); } // check if auth_key is set and valid if self.is_private() { match key { Some(key) => { if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) + return Err(TorrentError::PeerKeyNotValid); } } None => { - return Err(TorrentError::PeerNotAuthenticated) + return Err(TorrentError::PeerNotAuthenticated); } } } @@ -110,7 +112,7 @@ impl TorrentTracker { // check if info_hash is whitelisted if self.is_whitelisted() { if self.is_info_hash_whitelisted(info_hash).await == false { - return Err(TorrentError::TorrentNotWhitelisted) + return Err(TorrentError::TorrentNotWhitelisted); } } @@ -155,7 +157,7 @@ impl TorrentTracker { pub async fn get_torrent_peers( &self, info_hash: &InfoHash, - peer_addr: &SocketAddr + peer_addr: &SocketAddr, ) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -205,7 +207,7 @@ impl TorrentTracker { let torrent_entry = TorrentEntry { peers: Default::default(), completed, - seeders + seeders, }; torrents.insert(info_hash.clone(), torrent_entry); } @@ -254,10 +256,10 @@ impl TorrentTracker { // Let's iterate through all torrents, and parse. for hash in torrent_hashes.iter() { - let mut torrent = TorrentEntry{ + let mut torrent = TorrentEntry { peers: BTreeMap::new(), completed: 0, - seeders: 0 + seeders: 0, }; let lock = self.torrents.write().await; @@ -307,10 +309,10 @@ impl TorrentTracker { // We get shadow data into local array to be handled. let mut shadow_copy: BTreeMap = BTreeMap::new(); for (infohash, completed) in shadow.iter() { - shadow_copy.insert(*infohash, TorrentEntry{ + shadow_copy.insert(*infohash, TorrentEntry { peers: Default::default(), completed: *completed, - seeders: 0 + seeders: 0, }); } diff --git a/src/tracker_stats.rs b/src/tracker_stats.rs index 1a6a71c2b..0bcd781ba 100644 --- a/src/tracker_stats.rs +++ b/src/tracker_stats.rs @@ -1,6 +1,7 @@ use std::sync::Arc; + use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; -use tokio::sync::mpsc::{Sender}; +use tokio::sync::mpsc::Sender; use tokio::sync::mpsc::error::SendError; const CHANNEL_BUFFER_SIZE: usize = 65_535; @@ -16,7 +17,7 @@ pub enum TrackerStatsEvent { Udp4Scrape, Udp6Connect, Udp6Announce, - Udp6Scrape + Udp6Scrape, } #[derive(Debug)] @@ -56,14 +57,14 @@ impl TrackerStats { pub struct StatsTracker { channel_sender: Option>, - pub stats: Arc> + pub stats: Arc>, } impl StatsTracker { pub fn new() -> Self { Self { channel_sender: None, - stats: Arc::new(RwLock::new(TrackerStats::new())) + stats: Arc::new(RwLock::new(TrackerStats::new())), } } diff --git a/src/utils.rs b/src/utils.rs index e3a8302df..fb2a94513 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,8 +1,9 @@ -use std::net::SocketAddr; -use std::time::SystemTime; use std::error::Error; use std::fmt::Write; use std::io::Cursor; +use std::net::SocketAddr; +use std::time::SystemTime; + use aquatic_udp_protocol::ConnectionId; use byteorder::{BigEndian, ReadBytesExt}; From 0604cb9ad5318adc57e366011a5da32ad8071f80 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sat, 16 Apr 2022 14:07:12 +0200 Subject: [PATCH 021/435] FIxing MySQL support --- src/mysql_database.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index be1fe649b..523a54cf3 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -36,20 +36,20 @@ impl Database for MysqlDatabase { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTO_INCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE + info_hash BINARY(20) NOT NULL UNIQUE );".to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( id integer PRIMARY KEY AUTO_INCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE, + info_hash BINARY(20) NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL );".to_string(); let create_keys_table = format!(" CREATE TABLE IF NOT EXISTS `keys` ( `id` INT NOT NULL AUTO_INCREMENT, - `key` VARCHAR({}) NOT NULL, + `key` BINARY({}) NOT NULL, `valid_until` INT(10) NOT NULL, PRIMARY KEY (`id`), UNIQUE (`key`) @@ -67,7 +67,7 @@ impl Database for MysqlDatabase { async fn load_persistent_torrent_data(&self) -> Result, database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { + let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT HEX(info_hash), completed FROM torrents", |(info_hash_string, completed): (String, u32)| { let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); (info_hash, completed) }).map_err(|_| database::Error::QueryReturnedNoRows)?; @@ -82,7 +82,8 @@ impl Database for MysqlDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (?, ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())); + let _ = db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())); + debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); } let _ = db_transaction.commit(); @@ -93,7 +94,7 @@ impl Database for MysqlDatabase { async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash => info_hash }) + match conn.exec_first::("SELECT HEX(info_hash) FROM whitelist WHERE info_hash = UNHEX(:info_hash)", params! { info_hash => info_hash }) .map_err(|_| database::Error::QueryReturnedNoRows)? { Some(info_hash) => { Ok(InfoHash::from_str(&info_hash).unwrap()) @@ -109,7 +110,7 @@ impl Database for MysqlDatabase { let info_hash_str = info_hash.to_string(); - match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { + match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (UNHEX(:info_hash_str))", params! { info_hash_str }) { Ok(_) => { Ok(1) } @@ -125,7 +126,7 @@ impl Database for MysqlDatabase { let info_hash = info_hash.to_string(); - match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { + match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = UNHEX(:info_hash)", params! { info_hash }) { Ok(_) => { Ok(1) } From 3a839cddb7962a433bba11838cba0e158357ac35 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 13:24:31 +0200 Subject: [PATCH 022/435] Fixing SQL issue and locking problem --- src/mysql_database.rs | 10 +++++++--- src/tracker.rs | 28 ++++++++++++++++++---------- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 523a54cf3..7ecae214a 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -9,7 +9,7 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; use crate::{AUTH_KEY_LENGTH, database, InfoHash}; -use crate::database::Database; +use crate::database::{Database, Error}; use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; @@ -82,11 +82,15 @@ impl Database for MysqlDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())); + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() { + return Err(Error::InvalidQuery); + } debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); } - let _ = db_transaction.commit(); + if db_transaction.commit().is_err() { + return Err(Error::DatabaseError); + }; Ok(()) } diff --git a/src/tracker.rs b/src/tracker.rs index 4f31256ea..a95c35c24 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -243,6 +243,7 @@ impl TorrentTracker { // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { info!("Cleaning torrents..."); + let lock = self.torrents.write().await; // First we create a mapping of all the torrent hashes in a vector, and we use this to iterate through the btreemap. @@ -292,23 +293,30 @@ impl TorrentTracker { pub async fn periodic_saving(&self) { // Get a lock for writing - let mut shadow = self.shadow.write().await; + // let mut shadow = self.shadow.write().await; // We will get the data and insert it into the shadow, while clearing updates. let mut updates = self.updates.write().await; - - for (infohash, completed) in updates.iter() { - if shadow.contains_key(infohash) { - shadow.remove(infohash); - } - shadow.insert(*infohash, *completed); + let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); + // let mut torrent_hashes: Vec = Vec::new(); + for (k, completed) in updates.iter() { + updates_cloned.insert(*k, *completed); } updates.clear(); drop(updates); - // We get shadow data into local array to be handled. + let mut shadows = self.shadow.write().await; + for (k, completed) in updates_cloned.iter() { + if shadows.contains_key(k) { + shadows.remove(k); + } + shadows.insert(*k, *completed); + } + drop(updates_cloned); + + // We updated the shadow data from the updates data, let's handle shadow data as expected. let mut shadow_copy: BTreeMap = BTreeMap::new(); - for (infohash, completed) in shadow.iter() { + for (infohash, completed) in shadows.iter() { shadow_copy.insert(*infohash, TorrentEntry { peers: Default::default(), completed: *completed, @@ -317,7 +325,7 @@ impl TorrentTracker { } // Drop the lock - drop(shadow); + drop(shadows); // We will now save the data from the shadow into the database. // This should not put any strain on the server itself, other then the harddisk/ssd. From 93b053793af0a6e59fb25a921d675505885cc6ee Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sun, 17 Apr 2022 13:31:40 +0200 Subject: [PATCH 023/435] feat: added statistics config option --- src/config.rs | 2 ++ src/torrust_http_tracker/handlers.rs | 8 +++--- src/torrust_udp_tracker/handlers.rs | 12 ++++----- src/tracker.rs | 37 +++++++++++++++++----------- 4 files changed, 34 insertions(+), 25 deletions(-) diff --git a/src/config.rs b/src/config.rs index 67078d608..b46f29d69 100644 --- a/src/config.rs +++ b/src/config.rs @@ -56,6 +56,7 @@ pub struct Configuration { pub mode: TrackerMode, pub db_driver: DatabaseDrivers, pub db_path: String, + pub statistics: bool, pub persistence: bool, pub persistence_interval: Option, pub cleanup_interval: Option, @@ -141,6 +142,7 @@ impl Configuration { mode: TrackerMode::PublicMode, db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), + statistics: true, persistence: false, persistence_interval: Some(900), cleanup_interval: Some(600), diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs index 8762faeaf..8e8f2576f 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/torrust_http_tracker/handlers.rs @@ -49,8 +49,8 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option // send stats event match announce_request.peer_addr { - IpAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp4Announce).await; } - IpAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp6Announce).await; } + IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp4Announce).await; } + IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp6Announce).await; } } send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) @@ -82,8 +82,8 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp4Scrape).await; } - IpAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Tcp6Scrape).await; } + IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp4Scrape).await; } + IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp6Scrape).await; } } send_scrape_response(files) diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs index df1a15451..c94e2e917 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/torrust_udp_tracker/handlers.rs @@ -75,8 +75,8 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Connect).await; } - SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Connect).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Connect).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Connect).await; } } Ok(response) @@ -134,8 +134,8 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Announce).await; } - SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Announce).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Announce).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Announce).await; } } Ok(announce_response) @@ -176,8 +176,8 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp4Scrape).await; } - SocketAddr::V6(_) => { tracker.stats_tracker.send_event(TrackerStatsEvent::Udp6Scrape).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Scrape).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Scrape).await; } } Ok(Response::from(ScrapeResponse { diff --git a/src/tracker.rs b/src/tracker.rs index defdecc1f..7a036c1af 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -6,16 +6,16 @@ use crate::common::{InfoHash}; use std::net::{SocketAddr}; use crate::{Configuration, database, key_manager}; use std::collections::btree_map::Entry; -use std::mem; use std::sync::Arc; use log::info; +use tokio::sync::mpsc::error::SendError; use crate::key_manager::AuthKey; use crate::database::{Database}; use crate::key_manager::Error::KeyInvalid; use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; -use crate::tracker_stats::{StatsTracker, TrackerStats}; +use crate::tracker_stats::{StatsTracker, TrackerStats, TrackerStatsEvent}; -#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)] +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] pub enum TrackerMode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] @@ -36,12 +36,13 @@ pub enum TrackerMode { pub struct TorrentTracker { + mode: TrackerMode, pub config: Arc, - torrents: tokio::sync::RwLock>, - updates: tokio::sync::RwLock>, - shadow: tokio::sync::RwLock>, - database: Box, - pub stats_tracker: StatsTracker + torrents: RwLock>, + updates: RwLock>, + shadow: RwLock>, + stats_tracker: StatsTracker, + database: Box } impl TorrentTracker { @@ -49,28 +50,30 @@ impl TorrentTracker { let database = database::connect_database(&config.db_driver, &config.db_path)?; let mut stats_tracker = StatsTracker::new(); - stats_tracker.run_worker(); + // starts a thread for updating tracker stats + if config.statistics { stats_tracker.run_worker(); } Ok(TorrentTracker { - config, + mode: config.mode, + config: config.clone(), torrents: RwLock::new(std::collections::BTreeMap::new()), updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), - database, - stats_tracker + stats_tracker, + database }) } pub fn is_public(&self) -> bool { - self.config.mode == TrackerMode::PublicMode + self.mode == TrackerMode::PublicMode } pub fn is_private(&self) -> bool { - self.config.mode == TrackerMode::PrivateMode || self.config.mode == TrackerMode::PrivateListedMode + self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode } pub fn is_whitelisted(&self) -> bool { - self.config.mode == TrackerMode::ListedMode || self.config.mode == TrackerMode::PrivateListedMode + self.mode == TrackerMode::ListedMode || self.mode == TrackerMode::PrivateListedMode } pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { @@ -227,6 +230,10 @@ impl TorrentTracker { self.stats_tracker.get_stats().await } + pub async fn send_stats_event(&self, event: TrackerStatsEvent) -> Option>> { + self.stats_tracker.send_event(event).await + } + pub async fn post_log(&self) { let torrents = self.torrents.read().await; let torrents_size = torrents.len(); From 9d0714db67db0b17610f525516f2ff5d9570c256 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 14:04:15 +0200 Subject: [PATCH 024/435] Updating README --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 929585c11..bb4649271 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,8 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] Peer authentication using time-bound keys * [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count +* [X] MySQL support added as engine option +* [X] Periodically saving added, interval can be configured ### Implemented BEPs * [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol From 9bc1ebb6057e94b7c70e4571f6e4ab67da6c142c Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 15:50:59 +0200 Subject: [PATCH 025/435] Fixing possible bug --- src/tracker.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/tracker.rs b/src/tracker.rs index 0e42f69e1..a67cb62e6 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::info; +use log::{debug, info}; use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -128,6 +128,7 @@ impl TorrentTracker { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { + debug!("{:#?}", torrent); let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; } @@ -308,7 +309,7 @@ impl TorrentTracker { let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); for (k, completed) in updates.iter() { - updates_cloned.insert(*k, *completed); + updates_cloned.insert(k.clone(), completed.clone()); } updates.clear(); drop(updates); @@ -318,16 +319,16 @@ impl TorrentTracker { if shadows.contains_key(k) { shadows.remove(k); } - shadows.insert(*k, *completed); + shadows.insert(k.clone(), completed.clone()); } drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. let mut shadow_copy: BTreeMap = BTreeMap::new(); for (infohash, completed) in shadows.iter() { - shadow_copy.insert(*infohash, TorrentEntry { + shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), - completed: *completed, + completed: completed.clone(), seeders: 0, }); } From 0c332cf382cbc26c325cd79e557b894e3aa57d9c Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 16:24:04 +0200 Subject: [PATCH 026/435] Fixing a buggy SQLite3 and MySQL --- src/mysql_database.rs | 4 ++-- src/sqlite_database.rs | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 7ecae214a..eea2190a3 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -82,10 +82,10 @@ impl Database for MysqlDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() { + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", (info_hash.to_string(), completed.to_string())).is_err() { return Err(Error::InvalidQuery); } - debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); + debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", info_hash.to_string(), completed.to_string()); } if db_transaction.commit().is_err() { diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index fa519ffd0..82bb9d4fc 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -82,7 +82,8 @@ impl Database for SqliteDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]); } let _ = db_transaction.commit(); From 84c6ca460c0f8d6a6bb1c63e1203bfe1530439a0 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 17:04:52 +0200 Subject: [PATCH 027/435] Speeding up inserting --- src/mysql_database.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index eea2190a3..ac8bbb54a 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -80,12 +80,24 @@ impl Database for MysqlDatabase { let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; + let mut insert_vector= vec![]; + for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", (info_hash.to_string(), completed.to_string())).is_err() { + insert_vector.push("(UNHEX('" + info_hash.to_string() + "'), " + completed.to_string() + ")"); + if insert_vector.len() == 1000 { + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES " + insert_vector.join(",") + "ON DUPLICATE KEY UPDATE completed = VALUES(completed)").is_err() { + return Err(Error::InvalidQuery); + } + insert_vector.clear(); + } + } + + if insert_vector.len() != 0 { + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES " + insert_vector.join(",") + "ON DUPLICATE KEY UPDATE completed = VALUES(completed)").is_err() { return Err(Error::InvalidQuery); } - debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", info_hash.to_string(), completed.to_string()); + insert_vector.clear(); } if db_transaction.commit().is_err() { From 8ebd1e5f1164dff111049efcda15a43bd1277a1b Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 17:13:19 +0200 Subject: [PATCH 028/435] Typo's --- src/mysql_database.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/mysql_database.rs b/src/mysql_database.rs index ac8bbb54a..15e2de633 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -84,9 +84,10 @@ impl Database for MysqlDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - insert_vector.push("(UNHEX('" + info_hash.to_string() + "'), " + completed.to_string() + ")"); + insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string())); if insert_vector.len() == 1000 { - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES " + insert_vector.join(",") + "ON DUPLICATE KEY UPDATE completed = VALUES(completed)").is_err() { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { return Err(Error::InvalidQuery); } insert_vector.clear(); @@ -94,7 +95,8 @@ impl Database for MysqlDatabase { } if insert_vector.len() != 0 { - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES " + insert_vector.join(",") + "ON DUPLICATE KEY UPDATE completed = VALUES(completed)").is_err() { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { return Err(Error::InvalidQuery); } insert_vector.clear(); From 1b4decac685b374cf1b4abab9cae64fb98a21ebc Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 22:06:33 +0200 Subject: [PATCH 029/435] Looking for reason of timeout --- src/tracker.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/tracker.rs b/src/tracker.rs index a67cb62e6..c523cefba 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -308,6 +308,7 @@ impl TorrentTracker { let mut updates = self.updates.write().await; let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); + info!("Copying updates to updates_cloned..."); for (k, completed) in updates.iter() { updates_cloned.insert(k.clone(), completed.clone()); } @@ -315,6 +316,7 @@ impl TorrentTracker { drop(updates); let mut shadows = self.shadow.write().await; + info!("Copying updates_cloned into the shadow to overwrite..."); for (k, completed) in updates_cloned.iter() { if shadows.contains_key(k) { shadows.remove(k); @@ -324,6 +326,7 @@ impl TorrentTracker { drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. + info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); for (infohash, completed) in shadows.iter() { shadow_copy.insert(infohash.clone(), TorrentEntry { @@ -338,11 +341,15 @@ impl TorrentTracker { // We will now save the data from the shadow into the database. // This should not put any strain on the server itself, other then the harddisk/ssd. + info!("Start saving shadow data into SQL..."); let result = self.database.save_persistent_torrent_data(&shadow_copy).await; if result.is_ok() { + info!("Done saving data to SQL and succeeded, emptying shadow..."); let mut shadow = self.shadow.write().await; shadow.clear(); drop(shadow); + } else { + info!("Done saving data to SQL and failed, not emptying shadow..."); } } } From 035c28d9cc6d52c83e08eda87b99b1562db60b75 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 22:10:16 +0200 Subject: [PATCH 030/435] Improving locking --- src/tracker.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tracker.rs b/src/tracker.rs index c523cefba..8ad1faf2f 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -315,19 +315,21 @@ impl TorrentTracker { updates.clear(); drop(updates); - let mut shadows = self.shadow.write().await; info!("Copying updates_cloned into the shadow to overwrite..."); for (k, completed) in updates_cloned.iter() { + let mut shadows = self.shadow.write().await; if shadows.contains_key(k) { shadows.remove(k); } shadows.insert(k.clone(), completed.clone()); + drop(shadows); } drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); + let mut shadows = self.shadow.write().await; for (infohash, completed) in shadows.iter() { shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), @@ -335,8 +337,6 @@ impl TorrentTracker { seeders: 0, }); } - - // Drop the lock drop(shadows); // We will now save the data from the shadow into the database. From aea2de04851058a58faf940f20cff65b315605f4 Mon Sep 17 00:00:00 2001 From: Power2All Date: Sun, 17 Apr 2022 22:12:11 +0200 Subject: [PATCH 031/435] Typo --- src/tracker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker.rs b/src/tracker.rs index 8ad1faf2f..c0c25bc41 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -329,7 +329,7 @@ impl TorrentTracker { // We updated the shadow data from the updates data, let's handle shadow data as expected. info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); - let mut shadows = self.shadow.write().await; + let shadows = self.shadow.read().await; for (infohash, completed) in shadows.iter() { shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), From 5d1ad9d959ad3de5cf3bac43142266a8a12ab418 Mon Sep 17 00:00:00 2001 From: WarmBeer Date: Mon, 25 Apr 2022 19:13:48 +0200 Subject: [PATCH 032/435] Revert "Development" --- README.md | 2 -- src/mysql_database.rs | 18 ++---------------- src/sqlite_database.rs | 3 +-- src/tracker.rs | 24 ++++++++---------------- 4 files changed, 11 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index bb4649271..929585c11 100644 --- a/README.md +++ b/README.md @@ -14,8 +14,6 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] Peer authentication using time-bound keys * [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count -* [X] MySQL support added as engine option -* [X] Periodically saving added, interval can be configured ### Implemented BEPs * [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 15e2de633..7ecae214a 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -80,26 +80,12 @@ impl Database for MysqlDatabase { let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; - let mut insert_vector= vec![]; - for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string())); - if insert_vector.len() == 1000 { - let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); - if db_transaction.query_drop(query).is_err() { - return Err(Error::InvalidQuery); - } - insert_vector.clear(); - } - } - - if insert_vector.len() != 0 { - let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); - if db_transaction.query_drop(query).is_err() { + if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() { return Err(Error::InvalidQuery); } - insert_vector.clear(); + debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); } if db_transaction.commit().is_err() { diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index 82bb9d4fc..fa519ffd0 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -82,8 +82,7 @@ impl Database for SqliteDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); - let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]); + let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); } let _ = db_transaction.commit(); diff --git a/src/tracker.rs b/src/tracker.rs index c0c25bc41..0e42f69e1 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::{debug, info}; +use log::info; use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -128,7 +128,6 @@ impl TorrentTracker { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { - debug!("{:#?}", torrent); let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; } @@ -308,48 +307,41 @@ impl TorrentTracker { let mut updates = self.updates.write().await; let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); - info!("Copying updates to updates_cloned..."); for (k, completed) in updates.iter() { - updates_cloned.insert(k.clone(), completed.clone()); + updates_cloned.insert(*k, *completed); } updates.clear(); drop(updates); - info!("Copying updates_cloned into the shadow to overwrite..."); + let mut shadows = self.shadow.write().await; for (k, completed) in updates_cloned.iter() { - let mut shadows = self.shadow.write().await; if shadows.contains_key(k) { shadows.remove(k); } - shadows.insert(k.clone(), completed.clone()); - drop(shadows); + shadows.insert(*k, *completed); } drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. - info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); - let shadows = self.shadow.read().await; for (infohash, completed) in shadows.iter() { - shadow_copy.insert(infohash.clone(), TorrentEntry { + shadow_copy.insert(*infohash, TorrentEntry { peers: Default::default(), - completed: completed.clone(), + completed: *completed, seeders: 0, }); } + + // Drop the lock drop(shadows); // We will now save the data from the shadow into the database. // This should not put any strain on the server itself, other then the harddisk/ssd. - info!("Start saving shadow data into SQL..."); let result = self.database.save_persistent_torrent_data(&shadow_copy).await; if result.is_ok() { - info!("Done saving data to SQL and succeeded, emptying shadow..."); let mut shadow = self.shadow.write().await; shadow.clear(); drop(shadow); - } else { - info!("Done saving data to SQL and failed, not emptying shadow..."); } } } From cef2016c918825694ea51eaa4b4527c62af454f9 Mon Sep 17 00:00:00 2001 From: WarmBeer Date: Mon, 25 Apr 2022 20:06:29 +0200 Subject: [PATCH 033/435] Revert "Revert "Development"" --- README.md | 2 ++ src/mysql_database.rs | 18 ++++++++++++++++-- src/sqlite_database.rs | 3 ++- src/tracker.rs | 24 ++++++++++++++++-------- 4 files changed, 36 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 929585c11..bb4649271 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,8 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] Peer authentication using time-bound keys * [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count +* [X] MySQL support added as engine option +* [X] Periodically saving added, interval can be configured ### Implemented BEPs * [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol diff --git a/src/mysql_database.rs b/src/mysql_database.rs index 7ecae214a..15e2de633 100644 --- a/src/mysql_database.rs +++ b/src/mysql_database.rs @@ -80,12 +80,26 @@ impl Database for MysqlDatabase { let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; + let mut insert_vector= vec![]; + for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() { + insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string())); + if insert_vector.len() == 1000 { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { + return Err(Error::InvalidQuery); + } + insert_vector.clear(); + } + } + + if insert_vector.len() != 0 { + let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); + if db_transaction.query_drop(query).is_err() { return Err(Error::InvalidQuery); } - debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string()); + insert_vector.clear(); } if db_transaction.commit().is_err() { diff --git a/src/sqlite_database.rs b/src/sqlite_database.rs index fa519ffd0..82bb9d4fc 100644 --- a/src/sqlite_database.rs +++ b/src/sqlite_database.rs @@ -82,7 +82,8 @@ impl Database for SqliteDatabase { for (info_hash, torrent_entry) in torrents { let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); + let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]); } let _ = db_transaction.commit(); diff --git a/src/tracker.rs b/src/tracker.rs index 0e42f69e1..c0c25bc41 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::info; +use log::{debug, info}; use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -128,6 +128,7 @@ impl TorrentTracker { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { + debug!("{:#?}", torrent); let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; } @@ -307,41 +308,48 @@ impl TorrentTracker { let mut updates = self.updates.write().await; let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); + info!("Copying updates to updates_cloned..."); for (k, completed) in updates.iter() { - updates_cloned.insert(*k, *completed); + updates_cloned.insert(k.clone(), completed.clone()); } updates.clear(); drop(updates); - let mut shadows = self.shadow.write().await; + info!("Copying updates_cloned into the shadow to overwrite..."); for (k, completed) in updates_cloned.iter() { + let mut shadows = self.shadow.write().await; if shadows.contains_key(k) { shadows.remove(k); } - shadows.insert(*k, *completed); + shadows.insert(k.clone(), completed.clone()); + drop(shadows); } drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. + info!("Handle shadow_copy to be updated into SQL..."); let mut shadow_copy: BTreeMap = BTreeMap::new(); + let shadows = self.shadow.read().await; for (infohash, completed) in shadows.iter() { - shadow_copy.insert(*infohash, TorrentEntry { + shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), - completed: *completed, + completed: completed.clone(), seeders: 0, }); } - - // Drop the lock drop(shadows); // We will now save the data from the shadow into the database. // This should not put any strain on the server itself, other then the harddisk/ssd. + info!("Start saving shadow data into SQL..."); let result = self.database.save_persistent_torrent_data(&shadow_copy).await; if result.is_ok() { + info!("Done saving data to SQL and succeeded, emptying shadow..."); let mut shadow = self.shadow.write().await; shadow.clear(); drop(shadow); + } else { + info!("Done saving data to SQL and failed, not emptying shadow..."); } } } From 6578c3545a4254c505d92e568a3a5e7cc0b57016 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 2 May 2022 12:42:18 +0200 Subject: [PATCH 034/435] chore: removed dhat --- Cargo.lock | 81 ++--------------------------------------------------- Cargo.toml | 4 --- src/main.rs | 32 +++++++++------------ 3 files changed, 15 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eb04ba651..56ecd77a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,15 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "addr2line" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" -dependencies = [ - "gimli", -] - [[package]] name = "adler" version = "1.0.2" @@ -95,21 +86,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" -[[package]] -name = "backtrace" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide 0.4.4", - "object", - "rustc-demangle", -] - [[package]] name = "base-x" version = "0.2.8" @@ -399,21 +375,6 @@ dependencies = [ "syn", ] -[[package]] -name = "dhat" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47003dc9f6368a88e85956c3b2573a7e6872746a3e5d762a8885da3a136a0381" -dependencies = [ - "backtrace", - "lazy_static", - "parking_lot 0.11.2", - "rustc-hash", - "serde 1.0.136", - "serde_json", - "thousands", -] - [[package]] name = "digest" version = "0.9.0" @@ -498,7 +459,7 @@ dependencies = [ "crc32fast", "libc", "libz-sys", - "miniz_oxide 0.5.1", + "miniz_oxide", ] [[package]] @@ -712,12 +673,6 @@ dependencies = [ "wasi 0.10.0+wasi-snapshot-preview1", ] -[[package]] -name = "gimli" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" - [[package]] name = "glob" version = "0.3.0" @@ -1065,16 +1020,6 @@ dependencies = [ "unicase", ] -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - [[package]] name = "miniz_oxide" version = "0.5.1" @@ -1308,15 +1253,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" version = "1.10.0" @@ -1657,12 +1593,6 @@ dependencies = [ "serde 1.0.136", ] -[[package]] -name = "rustc-demangle" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" - [[package]] name = "rustc-hash" version = "1.1.0" @@ -2127,12 +2057,6 @@ dependencies = [ "syn", ] -[[package]] -name = "thousands" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" - [[package]] name = "time" version = "0.1.44" @@ -2302,7 +2226,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.2.1" +version = "2.3.0" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -2311,7 +2235,6 @@ dependencies = [ "chrono", "config", "derive_more", - "dhat", "fern", "futures", "hex", diff --git a/Cargo.toml b/Cargo.toml index a10d548c2..7be32ce89 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,9 +10,6 @@ edition = "2018" debug = 1 lto = "fat" -[features] -dhat-heap = [] # if you are doing heap profiling - [dependencies] serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" @@ -38,4 +35,3 @@ thiserror = "1.0" aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" -dhat = "0.3.0" diff --git a/src/main.rs b/src/main.rs index b17ef14fe..5e16f2e0c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,15 +7,8 @@ use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; use torrust_tracker::torrust_http_tracker::server::HttpServer; -#[cfg(feature = "dhat-heap")] -#[global_allocator] -static ALLOC: dhat::Alloc = dhat::Alloc; - #[tokio::main] async fn main() { - #[cfg(feature = "dhat-heap")] - let _profiler = dhat::Profiler::new_heap(); - // torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), @@ -29,6 +22,7 @@ async fn main() { panic!("{}", e) })); + // initialize logging logging::setup_logging(&config); // load persistent torrents if enabled @@ -39,11 +33,11 @@ async fn main() { }; info!("Persistent torrents loaded."); - let _torrent_periodic_job = start_torrent_periodic_job(config.clone(), tracker.clone()).unwrap(); + let _torrent_periodic_job = start_torrent_periodic_job(config.clone(), tracker.clone()); } // start torrent cleanup job (periodically removes old peers) - let _torrent_cleanup_job = start_torrent_cleanup_job(config.clone(), tracker.clone()).unwrap(); + let _torrent_cleanup_job = start_torrent_cleanup_job(config.clone(), tracker.clone()); // start HTTP API server if config.http_api.enabled { @@ -80,7 +74,7 @@ async fn main() { } // start a thread to post statistics - let _ = start_statistics_job(config.clone(), tracker.clone()).unwrap(); + let _ = start_statistics_job(config.clone(), tracker.clone()); // handle the signals here tokio::select! { @@ -103,11 +97,11 @@ async fn main() { } } -fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> Option> { +fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.persistence_interval.unwrap_or(900); - return Some(tokio::spawn(async move { + tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); let mut interval = tokio::time::interval(interval); interval.tick().await; // first tick is immediate... @@ -122,14 +116,14 @@ fn start_torrent_periodic_job(config: Arc, tracker: Arc, tracker: Arc) -> Option> { +fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.cleanup_interval.unwrap_or(600); - return Some(tokio::spawn(async move { + tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); let mut interval = tokio::time::interval(interval); interval.tick().await; // first tick is immediate... @@ -142,14 +136,14 @@ fn start_torrent_cleanup_job(config: Arc, tracker: Arc, tracker: Arc) -> Option> { +fn start_statistics_job(config: Arc, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.log_interval.unwrap_or(60); - return Some(tokio::spawn(async move { + tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); let mut interval = tokio::time::interval(interval); interval.tick().await; // first tick is immediate... @@ -162,7 +156,7 @@ fn start_statistics_job(config: Arc, tracker: Arc break; } } - })); + }) } fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> JoinHandle<()> { From 52785a0b58097151eb85c663269f9210fb51cea5 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 2 May 2022 23:04:37 +0200 Subject: [PATCH 035/435] refactor: renamed http and udp modules --- Cargo.toml | 2 +- src/{torrust_http_tracker => http}/errors.rs | 0 src/{torrust_http_tracker => http}/filters.rs | 2 +- src/{torrust_http_tracker => http}/handlers.rs | 2 +- src/{torrust_http_tracker => http}/mod.rs | 0 src/{torrust_http_tracker => http}/request.rs | 2 +- src/{torrust_http_tracker => http}/response.rs | 0 src/{torrust_http_tracker => http}/routes.rs | 2 +- src/{torrust_http_tracker => http}/server.rs | 2 +- src/lib.rs | 8 ++++---- src/main.rs | 2 +- src/torrent.rs | 2 +- src/{torrust_udp_tracker => udp}/errors.rs | 0 src/{torrust_udp_tracker => udp}/handlers.rs | 4 ++-- src/{torrust_udp_tracker => udp}/mod.rs | 0 src/{torrust_udp_tracker => udp}/request.rs | 0 src/{torrust_udp_tracker => udp}/server.rs | 2 +- 17 files changed, 15 insertions(+), 15 deletions(-) rename src/{torrust_http_tracker => http}/errors.rs (100%) rename src/{torrust_http_tracker => http}/filters.rs (98%) rename src/{torrust_http_tracker => http}/handlers.rs (96%) rename src/{torrust_http_tracker => http}/mod.rs (100%) rename src/{torrust_http_tracker => http}/request.rs (94%) rename src/{torrust_http_tracker => http}/response.rs (100%) rename src/{torrust_http_tracker => http}/routes.rs (90%) rename src/{torrust_http_tracker => http}/server.rs (97%) rename src/{torrust_udp_tracker => udp}/errors.rs (100%) rename src/{torrust_udp_tracker => udp}/handlers.rs (98%) rename src/{torrust_udp_tracker => udp}/mod.rs (100%) rename src/{torrust_udp_tracker => udp}/request.rs (100%) rename src/{torrust_udp_tracker => udp}/server.rs (97%) diff --git a/Cargo.toml b/Cargo.toml index 7be32ce89..cc97072a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "torrust-tracker" -version = "2.2.1" +version = "2.3.0" license = "AGPL-3.0" authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." diff --git a/src/torrust_http_tracker/errors.rs b/src/http/errors.rs similarity index 100% rename from src/torrust_http_tracker/errors.rs rename to src/http/errors.rs diff --git a/src/torrust_http_tracker/filters.rs b/src/http/filters.rs similarity index 98% rename from src/torrust_http_tracker/filters.rs rename to src/http/filters.rs index 5c4fc9743..5e0c3e068 100644 --- a/src/torrust_http_tracker/filters.rs +++ b/src/http/filters.rs @@ -8,7 +8,7 @@ use warp::{Filter, reject, Rejection}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; use crate::key_manager::AuthKey; -use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; +use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; /// Pass Arc along pub fn with_tracker(tracker: Arc) -> impl Filter, ), Error=Infallible> + Clone { diff --git a/src/torrust_http_tracker/handlers.rs b/src/http/handlers.rs similarity index 96% rename from src/torrust_http_tracker/handlers.rs rename to src/http/handlers.rs index 9021e8858..c81c93d9b 100644 --- a/src/torrust_http_tracker/handlers.rs +++ b/src/http/handlers.rs @@ -10,7 +10,7 @@ use warp::http::Response; use crate::{InfoHash, TorrentTracker}; use crate::key_manager::AuthKey; use crate::torrent::{TorrentError, TorrentPeer, TorrentStats}; -use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; +use crate::http::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; use crate::tracker_stats::TrackerStatsEvent; use crate::utils::url_encode_bytes; diff --git a/src/torrust_http_tracker/mod.rs b/src/http/mod.rs similarity index 100% rename from src/torrust_http_tracker/mod.rs rename to src/http/mod.rs diff --git a/src/torrust_http_tracker/request.rs b/src/http/request.rs similarity index 94% rename from src/torrust_http_tracker/request.rs rename to src/http/request.rs index 487e53a13..28cd4750e 100644 --- a/src/torrust_http_tracker/request.rs +++ b/src/http/request.rs @@ -3,7 +3,7 @@ use std::net::IpAddr; use serde::Deserialize; use crate::{InfoHash, PeerId}; -use crate::torrust_http_tracker::Bytes; +use crate::http::Bytes; #[derive(Deserialize)] pub struct AnnounceRequestQuery { diff --git a/src/torrust_http_tracker/response.rs b/src/http/response.rs similarity index 100% rename from src/torrust_http_tracker/response.rs rename to src/http/response.rs diff --git a/src/torrust_http_tracker/routes.rs b/src/http/routes.rs similarity index 90% rename from src/torrust_http_tracker/routes.rs rename to src/http/routes.rs index fb6bf5c16..775d9ce79 100644 --- a/src/torrust_http_tracker/routes.rs +++ b/src/http/routes.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use warp::{Filter, Rejection}; use crate::TorrentTracker; -use crate::torrust_http_tracker::{handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use crate::http::{handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; /// All routes pub fn routes(tracker: Arc) -> impl Filter + Clone { diff --git a/src/torrust_http_tracker/server.rs b/src/http/server.rs similarity index 97% rename from src/torrust_http_tracker/server.rs rename to src/http/server.rs index 336670030..31a8e4664 100644 --- a/src/torrust_http_tracker/server.rs +++ b/src/http/server.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; use crate::TorrentTracker; -use crate::torrust_http_tracker::routes; +use crate::http::routes; /// Server that listens on HTTP, needs a TorrentTracker #[derive(Clone)] diff --git a/src/lib.rs b/src/lib.rs index b6cebfc5e..4f8e9a241 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,5 @@ -pub use torrust_http_tracker::server::*; -pub use torrust_udp_tracker::server::*; +pub use http::server::*; +pub use udp::server::*; pub use self::common::*; pub use self::config::*; @@ -14,8 +14,8 @@ pub mod utils; pub mod sqlite_database; pub mod key_manager; pub mod logging; -pub mod torrust_udp_tracker; -pub mod torrust_http_tracker; +pub mod udp; +pub mod http; pub mod database; pub mod mysql_database; pub mod torrent; diff --git a/src/main.rs b/src/main.rs index 5e16f2e0c..b2bcb31d1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -5,7 +5,7 @@ use log::info; use tokio::task::JoinHandle; use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; -use torrust_tracker::torrust_http_tracker::server::HttpServer; +use torrust_tracker::http::server::HttpServer; #[tokio::main] async fn main() { diff --git a/src/torrent.rs b/src/torrent.rs index e2984a490..4e44a995a 100644 --- a/src/torrent.rs +++ b/src/torrent.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; use crate::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::torrust_http_tracker::AnnounceRequest; +use crate::http::AnnounceRequest; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] pub struct TorrentPeer { diff --git a/src/torrust_udp_tracker/errors.rs b/src/udp/errors.rs similarity index 100% rename from src/torrust_udp_tracker/errors.rs rename to src/udp/errors.rs diff --git a/src/torrust_udp_tracker/handlers.rs b/src/udp/handlers.rs similarity index 98% rename from src/torrust_udp_tracker/handlers.rs rename to src/udp/handlers.rs index ff6e8981b..23fac0405 100644 --- a/src/torrust_udp_tracker/handlers.rs +++ b/src/udp/handlers.rs @@ -5,8 +5,8 @@ use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; use crate::torrent::{TorrentError, TorrentPeer}; -use crate::torrust_udp_tracker::errors::ServerError; -use crate::torrust_udp_tracker::request::AnnounceRequestWrapper; +use crate::udp::errors::ServerError; +use crate::udp::request::AnnounceRequestWrapper; use crate::tracker_stats::TrackerStatsEvent; use crate::utils::get_connection_id; diff --git a/src/torrust_udp_tracker/mod.rs b/src/udp/mod.rs similarity index 100% rename from src/torrust_udp_tracker/mod.rs rename to src/udp/mod.rs diff --git a/src/torrust_udp_tracker/request.rs b/src/udp/request.rs similarity index 100% rename from src/torrust_udp_tracker/request.rs rename to src/udp/request.rs diff --git a/src/torrust_udp_tracker/server.rs b/src/udp/server.rs similarity index 97% rename from src/torrust_udp_tracker/server.rs rename to src/udp/server.rs index 8dc34d85d..03745192b 100644 --- a/src/torrust_udp_tracker/server.rs +++ b/src/udp/server.rs @@ -7,7 +7,7 @@ use log::{debug, info}; use tokio::net::UdpSocket; use crate::TorrentTracker; -use crate::torrust_udp_tracker::{handle_packet, MAX_PACKET_SIZE}; +use crate::udp::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { socket: Arc, From 5a6eaa41a319bff79e4ab2d291fb5b9d99dfef75 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 5 May 2022 23:58:10 +0200 Subject: [PATCH 036/435] refactor: moved databases to own module --- src/config.rs | 2 +- src/{ => databases}/database.rs | 4 ++-- src/databases/mod.rs | 3 +++ src/{mysql_database.rs => databases/mysql.rs} | 5 +++-- src/{sqlite_database.rs => databases/sqlite.rs} | 5 +++-- src/lib.rs | 7 +++---- src/tracker.rs | 7 ++++--- 7 files changed, 19 insertions(+), 14 deletions(-) rename src/{ => databases}/database.rs (96%) create mode 100644 src/databases/mod.rs rename src/{mysql_database.rs => databases/mysql.rs} (98%) rename src/{sqlite_database.rs => databases/sqlite.rs} (98%) diff --git a/src/config.rs b/src/config.rs index ce3f59760..c6901564f 100644 --- a/src/config.rs +++ b/src/config.rs @@ -9,7 +9,7 @@ use config::{Config, ConfigError, File}; use serde::{Deserialize, Serialize, Serializer}; use toml; -use crate::database::DatabaseDrivers; +use crate::databases::database::DatabaseDrivers; pub use crate::tracker::TrackerMode; #[derive(Serialize, Deserialize, PartialEq)] diff --git a/src/database.rs b/src/databases/database.rs similarity index 96% rename from src/database.rs rename to src/databases/database.rs index a90161e91..b39a0ada1 100644 --- a/src/database.rs +++ b/src/databases/database.rs @@ -7,8 +7,8 @@ use serde::{Deserialize, Serialize}; use crate::InfoHash; use crate::key_manager::AuthKey; -use crate::mysql_database::MysqlDatabase; -use crate::sqlite_database::SqliteDatabase; +use crate::databases::mysql::MysqlDatabase; +use crate::databases::sqlite::SqliteDatabase; use crate::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] diff --git a/src/databases/mod.rs b/src/databases/mod.rs new file mode 100644 index 000000000..119e34816 --- /dev/null +++ b/src/databases/mod.rs @@ -0,0 +1,3 @@ +pub mod mysql; +pub mod sqlite; +pub mod database; diff --git a/src/mysql_database.rs b/src/databases/mysql.rs similarity index 98% rename from src/mysql_database.rs rename to src/databases/mysql.rs index 15e2de633..df85402b3 100644 --- a/src/mysql_database.rs +++ b/src/databases/mysql.rs @@ -8,8 +8,9 @@ use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; -use crate::{AUTH_KEY_LENGTH, database, InfoHash}; -use crate::database::{Database, Error}; +use crate::{AUTH_KEY_LENGTH, InfoHash}; +use crate::databases::database::{Database, Error}; +use crate::databases::database; use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; diff --git a/src/sqlite_database.rs b/src/databases/sqlite.rs similarity index 98% rename from src/sqlite_database.rs rename to src/databases/sqlite.rs index 82bb9d4fc..b51fd6c51 100644 --- a/src/sqlite_database.rs +++ b/src/databases/sqlite.rs @@ -7,8 +7,9 @@ use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use r2d2_sqlite::rusqlite::NO_PARAMS; -use crate::{AUTH_KEY_LENGTH, database, InfoHash}; -use crate::database::Database; +use crate::{AUTH_KEY_LENGTH, InfoHash}; +use crate::databases::database::Database; +use crate::databases::database; use crate::key_manager::AuthKey; use crate::torrent::TorrentEntry; diff --git a/src/lib.rs b/src/lib.rs index 4f8e9a241..e965fa88d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,13 +11,12 @@ pub mod tracker; pub mod http_api_server; pub mod common; pub mod utils; -pub mod sqlite_database; pub mod key_manager; pub mod logging; pub mod udp; pub mod http; -pub mod database; -pub mod mysql_database; pub mod torrent; pub mod tracker_stats; - +pub mod setup; +pub mod persistent_torrent_statistics; +pub mod databases; diff --git a/src/tracker.rs b/src/tracker.rs index c0c25bc41..b8c2b9931 100644 --- a/src/tracker.rs +++ b/src/tracker.rs @@ -8,10 +8,11 @@ use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; -use crate::{Configuration, database, key_manager}; +use crate::{Configuration, key_manager}; use crate::common::InfoHash; -use crate::database::Database; +use crate::databases::database::Database; use tokio::sync::mpsc::error::SendError; +use crate::databases::database; use crate::key_manager::AuthKey; use crate::key_manager::Error::KeyInvalid; use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; @@ -124,7 +125,7 @@ impl TorrentTracker { } // Loading the torrents into memory - pub async fn load_torrents(&self) -> Result<(), database::Error> { + pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let torrents = self.database.load_persistent_torrent_data().await?; for torrent in torrents { From 4bf0c8b1b35b6df3faba79ed022dc468611022ae Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 6 May 2022 23:21:51 +0200 Subject: [PATCH 037/435] refactor: major refactor of all code --- src/api/mod.rs | 1 + src/{http_api_server.rs => api/server.rs} | 20 +- src/config.rs | 6 +- src/databases/database.rs | 6 +- src/databases/mysql.rs | 6 +- src/databases/sqlite.rs | 6 +- src/http/filters.rs | 5 +- src/http/handlers.rs | 20 +- src/http/routes.rs | 10 +- src/http/server.rs | 16 +- src/jobs/http_tracker.rs | 27 +++ src/jobs/log_statistics.rs | 32 +++ src/jobs/mod.rs | 6 + src/jobs/persistent_torrent_statistics.rs | 38 ++++ src/jobs/torrent_cleanup.rs | 36 ++++ src/jobs/tracker_api.rs | 15 ++ src/jobs/udp_tracker.rs | 22 ++ src/lib.rs | 14 +- src/main.rs | 194 ++---------------- src/{ => protocol}/common.rs | 0 src/protocol/mod.rs | 2 + src/{ => protocol}/utils.rs | 0 src/setup.rs | 54 +++++ src/{key_manager.rs => tracker/key.rs} | 18 +- src/tracker/mod.rs | 5 + src/tracker/peer.rs | 83 ++++++++ .../statistics.rs} | 38 ++-- src/{ => tracker}/torrent.rs | 83 +------- src/{ => tracker}/tracker.rs | 67 +++--- src/udp/handlers.rs | 22 +- src/udp/server.rs | 9 +- 31 files changed, 475 insertions(+), 386 deletions(-) create mode 100644 src/api/mod.rs rename src/{http_api_server.rs => api/server.rs} (95%) create mode 100644 src/jobs/http_tracker.rs create mode 100644 src/jobs/log_statistics.rs create mode 100644 src/jobs/mod.rs create mode 100644 src/jobs/persistent_torrent_statistics.rs create mode 100644 src/jobs/torrent_cleanup.rs create mode 100644 src/jobs/tracker_api.rs create mode 100644 src/jobs/udp_tracker.rs rename src/{ => protocol}/common.rs (100%) create mode 100644 src/protocol/mod.rs rename src/{ => protocol}/utils.rs (100%) create mode 100644 src/setup.rs rename src/{key_manager.rs => tracker/key.rs} (85%) create mode 100644 src/tracker/mod.rs create mode 100644 src/tracker/peer.rs rename src/{tracker_stats.rs => tracker/statistics.rs} (67%) rename src/{ => tracker}/torrent.rs (57%) rename src/{ => tracker}/tracker.rs (90%) diff --git a/src/api/mod.rs b/src/api/mod.rs new file mode 100644 index 000000000..74f47ad34 --- /dev/null +++ b/src/api/mod.rs @@ -0,0 +1 @@ +pub mod server; diff --git a/src/http_api_server.rs b/src/api/server.rs similarity index 95% rename from src/http_api_server.rs rename to src/api/server.rs index 89505cb09..77264162c 100644 --- a/src/http_api_server.rs +++ b/src/api/server.rs @@ -1,14 +1,14 @@ use std::cmp::min; use std::collections::{HashMap, HashSet}; +use std::net::SocketAddr; use std::sync::Arc; use serde::{Deserialize, Serialize}; -use warp::{Filter, filters, reply, reply::Reply, serve, Server}; +use warp::{Filter, filters, reply, serve}; -use crate::torrent::TorrentPeer; -use crate::tracker::TorrentTracker; - -use super::common::*; +use crate::protocol::common::*; +use crate::peer::TorrentPeer; +use crate::tracker::tracker::TorrentTracker; #[derive(Deserialize, Debug)] struct TorrentInfoQuery { @@ -84,7 +84,7 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> Server + Clone + Send + Sync + 'static> { +pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -309,5 +309,11 @@ pub fn build_server(tracker: Arc) -> Server, - pub cleanup_interval: Option, + pub cleanup_interval: u64, pub cleanup_peerless: bool, pub external_ip: Option, pub announce_interval: u32, @@ -147,7 +147,7 @@ impl Configuration { statistics: true, persistence: false, persistence_interval: Some(900), - cleanup_interval: Some(600), + cleanup_interval: 600, cleanup_peerless: true, external_ip: Some(String::from("0.0.0.0")), announce_interval: 120, diff --git a/src/databases/database.rs b/src/databases/database.rs index b39a0ada1..fd9f2a19d 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -6,10 +6,10 @@ use log::debug; use serde::{Deserialize, Serialize}; use crate::InfoHash; -use crate::key_manager::AuthKey; +use crate::tracker::key::AuthKey; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; -use crate::torrent::TorrentEntry; +use crate::tracker::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { @@ -40,7 +40,7 @@ pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result Result<(), Error>; - async fn load_persistent_torrent_data(&self) -> Result, Error>; + async fn load_persistent_torrents(&self) -> Result, Error>; async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), Error>; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index df85402b3..e7f57a7a4 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -11,8 +11,8 @@ use r2d2_mysql::MysqlConnectionManager; use crate::{AUTH_KEY_LENGTH, InfoHash}; use crate::databases::database::{Database, Error}; use crate::databases::database; -use crate::key_manager::AuthKey; -use crate::torrent::TorrentEntry; +use crate::tracker::key::AuthKey; +use crate::tracker::torrent::TorrentEntry; pub struct MysqlDatabase { pool: Pool, @@ -65,7 +65,7 @@ impl Database for MysqlDatabase { Ok(()) } - async fn load_persistent_torrent_data(&self) -> Result, database::Error> { + async fn load_persistent_torrents(&self) -> Result, database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT HEX(info_hash), completed FROM torrents", |(info_hash_string, completed): (String, u32)| { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index b51fd6c51..18a1d5a28 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -10,8 +10,8 @@ use r2d2_sqlite::rusqlite::NO_PARAMS; use crate::{AUTH_KEY_LENGTH, InfoHash}; use crate::databases::database::Database; use crate::databases::database; -use crate::key_manager::AuthKey; -use crate::torrent::TorrentEntry; +use crate::tracker::key::AuthKey; +use crate::tracker::torrent::TorrentEntry; pub struct SqliteDatabase { pool: Pool, @@ -59,7 +59,7 @@ impl Database for SqliteDatabase { .map(|_| ()) } - async fn load_persistent_torrent_data(&self) -> Result, database::Error> { + async fn load_persistent_torrents(&self) -> Result, database::Error> { let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; diff --git a/src/http/filters.rs b/src/http/filters.rs index 5e0c3e068..8f3ee04c0 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -6,9 +6,10 @@ use std::sync::Arc; use log::debug; use warp::{Filter, reject, Rejection}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; -use crate::key_manager::AuthKey; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; +use crate::tracker::key::AuthKey; use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; +use crate::tracker::tracker::TorrentTracker; /// Pass Arc along pub fn with_tracker(tracker: Arc) -> impl Filter, ), Error=Infallible> + Clone { diff --git a/src/http/handlers.rs b/src/http/handlers.rs index c81c93d9b..d7e4859d9 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -7,12 +7,14 @@ use log::debug; use warp::{reject, Rejection, Reply}; use warp::http::Response; -use crate::{InfoHash, TorrentTracker}; -use crate::key_manager::AuthKey; -use crate::torrent::{TorrentError, TorrentPeer, TorrentStats}; +use crate::{InfoHash}; +use crate::tracker::key::AuthKey; +use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::http::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; -use crate::tracker_stats::TrackerStatsEvent; -use crate::utils::url_encode_bytes; +use crate::peer::TorrentPeer; +use crate::tracker::statistics::TrackerStatisticsEvent; +use crate::protocol::utils::url_encode_bytes; +use crate::tracker::tracker::TorrentTracker; /// Authenticate InfoHash using optional AuthKey pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { @@ -51,8 +53,8 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option // send stats event match announce_request.peer_addr { - IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp4Announce).await; } - IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp6Announce).await; } + IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Announce).await; } + IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; } } send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) @@ -84,8 +86,8 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { tracker.send_stats_event(TrackerStatsEvent::Tcp4Scrape).await; } - IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Tcp6Scrape).await; } + IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Scrape).await; } + IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Scrape).await; } } send_scrape_response(files) diff --git a/src/http/routes.rs b/src/http/routes.rs index 775d9ce79..a0b197f44 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -3,8 +3,14 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use crate::TorrentTracker; -use crate::http::{handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use crate::http::handle_announce; +use crate::http::handle_scrape; +use crate::http::send_error; +use crate::http::with_announce_request; +use crate::http::with_auth_key; +use crate::http::with_scrape_request; +use crate::http::with_tracker; +use crate::tracker::tracker::TorrentTracker; /// All routes pub fn routes(tracker: Arc) -> impl Filter + Clone { diff --git a/src/http/server.rs b/src/http/server.rs index 31a8e4664..5a5b5f735 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -1,8 +1,8 @@ use std::net::SocketAddr; use std::sync::Arc; -use crate::TorrentTracker; use crate::http::routes; +use crate::tracker::tracker::TorrentTracker; /// Server that listens on HTTP, needs a TorrentTracker #[derive(Clone)] @@ -18,18 +18,19 @@ impl HttpServer { } /// Start the HttpServer - pub async fn start(&self, socket_addr: SocketAddr) { + pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { let (_addr, server) = warp::serve(routes(self.tracker.clone())) .bind_with_graceful_shutdown(socket_addr, async move { tokio::signal::ctrl_c() .await - .expect("failed to listen to shutdown signal"); + .expect("Failed to listen to shutdown signal."); }); - tokio::task::spawn(server); + + server } /// Start the HttpServer in TLS mode - pub async fn start_tls(&self, socket_addr: SocketAddr, ssl_cert_path: &str, ssl_key_path: &str) { + pub fn start_tls(&self, socket_addr: SocketAddr, ssl_cert_path: String, ssl_key_path: String) -> impl warp::Future { let (_addr, server) = warp::serve(routes(self.tracker.clone())) .tls() .cert_path(ssl_cert_path) @@ -37,8 +38,9 @@ impl HttpServer { .bind_with_graceful_shutdown(socket_addr, async move { tokio::signal::ctrl_c() .await - .expect("failed to listen to shutdown signal"); + .expect("Failed to listen to shutdown signal."); }); - tokio::task::spawn(server); + + server } } diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs new file mode 100644 index 000000000..85f64200f --- /dev/null +++ b/src/jobs/http_tracker.rs @@ -0,0 +1,27 @@ +use std::net::SocketAddr; +use std::sync::Arc; +use log::{info, warn}; +use tokio::task::JoinHandle; +use crate::{HttpServer, HttpTrackerConfig}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config.bind_address.parse::().unwrap(); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); + + tokio::spawn(async move { + let http_tracker = HttpServer::new(tracker); + + if !ssl_enabled { + info!("Starting HTTP server on: {}", bind_addr); + http_tracker.start(bind_addr).await; + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting HTTPS server on: {} (TLS)", bind_addr); + http_tracker.start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()).await; + } else { + warn!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", bind_addr); + } + }) +} diff --git a/src/jobs/log_statistics.rs b/src/jobs/log_statistics.rs new file mode 100644 index 000000000..f62399a47 --- /dev/null +++ b/src/jobs/log_statistics.rs @@ -0,0 +1,32 @@ +use std::sync::Arc; +use log::info; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.log_interval.unwrap_or(60); + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Stopping statistics logging job.."); + break; + } + _ = interval.tick() => { + if let Some(tracker) = weak_tracker.upgrade() { + tracker.post_log().await; + } else { + break; + } + } + } + } + }) +} diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs new file mode 100644 index 000000000..a71fcb210 --- /dev/null +++ b/src/jobs/mod.rs @@ -0,0 +1,6 @@ +pub mod persistent_torrent_statistics; +pub mod torrent_cleanup; +pub mod tracker_api; +pub mod log_statistics; +pub mod http_tracker; +pub mod udp_tracker; diff --git a/src/jobs/persistent_torrent_statistics.rs b/src/jobs/persistent_torrent_statistics.rs new file mode 100644 index 000000000..7ebc80bdb --- /dev/null +++ b/src/jobs/persistent_torrent_statistics.rs @@ -0,0 +1,38 @@ +use std::sync::Arc; +use log::info; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.persistence_interval.unwrap_or(900); + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + // periodically save torrents to database + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + // Save before shutting down + tracker.periodic_saving().await; + info!("Stopping periodic torrent saving job.."); + break; + } + _ = interval.tick() => { + if let Some(tracker) = weak_tracker.upgrade() { + info!("Saving torrents to database..."); + tracker.periodic_saving().await; + info!("Periodic saving done."); + } else { + // If tracker no longer exists, stop job + break; + } + } + } + } + }) +} diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs new file mode 100644 index 000000000..05e639728 --- /dev/null +++ b/src/jobs/torrent_cleanup.rs @@ -0,0 +1,36 @@ +use std::sync::Arc; +use chrono::Utc; +use log::info; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(&tracker); + let interval = config.cleanup_interval; + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Stopping torrent cleanup job.."); + break; + } + _ = interval.tick() => { + if let Some(tracker) = weak_tracker.upgrade() { + let start_time = Utc::now().time(); + info!("Cleaning up torrents.."); + tracker.cleanup_torrents().await; + info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()) + } else { + break; + } + } + } + } + }) +} diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs new file mode 100644 index 000000000..476a87a6a --- /dev/null +++ b/src/jobs/tracker_api.rs @@ -0,0 +1,15 @@ +use std::sync::Arc; +use log::info; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::api::server; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config.http_api.bind_address.parse::().expect("Tracker API bind_address invalid."); + info!("Starting Torrust API server on: {}", bind_addr); + + tokio::spawn(async move { + server::start(bind_addr, tracker).await; + }) +} diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs new file mode 100644 index 000000000..32ef76ef4 --- /dev/null +++ b/src/jobs/udp_tracker.rs @@ -0,0 +1,22 @@ +use std::sync::Arc; +use log::{error, info, warn}; +use tokio::task::JoinHandle; +use crate::{UdpServer, UdpTrackerConfig}; +use crate::tracker::tracker::TorrentTracker; + +pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config.bind_address.clone(); + + tokio::spawn(async move { + match UdpServer::new(tracker, &bind_addr).await { + Ok(udp_server) => { + info!("Starting UDP server on: {}", bind_addr); + udp_server.start().await; + } + Err(e) => { + warn!("Could not start UDP tracker on: {}", bind_addr); + error!("{}", e); + } + } + }) +} diff --git a/src/lib.rs b/src/lib.rs index e965fa88d..245f4686c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,22 +1,18 @@ pub use http::server::*; pub use udp::server::*; -pub use self::common::*; +pub use protocol::common::*; pub use self::config::*; -pub use self::http_api_server::*; +pub use api::server::*; pub use self::tracker::*; pub mod config; pub mod tracker; -pub mod http_api_server; -pub mod common; -pub mod utils; -pub mod key_manager; pub mod logging; pub mod udp; pub mod http; -pub mod torrent; -pub mod tracker_stats; pub mod setup; -pub mod persistent_torrent_statistics; pub mod databases; +pub mod jobs; +pub mod api; +pub mod protocol; diff --git a/src/main.rs b/src/main.rs index b2bcb31d1..794cda4bb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,15 +1,13 @@ -use std::net::SocketAddr; use std::sync::Arc; - use log::info; -use tokio::task::JoinHandle; - -use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; -use torrust_tracker::http::server::HttpServer; +use torrust_tracker::Configuration; +use torrust_tracker::logging; +use torrust_tracker::setup; +use torrust_tracker::tracker::tracker::TorrentTracker; #[tokio::main] async fn main() { - // torrust config + // Initialize Torrust config let config = match Configuration::load_from_file() { Ok(config) => Arc::new(config), Err(error) => { @@ -17,184 +15,28 @@ async fn main() { } }; - // the singleton torrent tracker that gets passed to the HTTP and UDP server - let tracker = Arc::new(TorrentTracker::new(config.clone()).unwrap_or_else(|e| { - panic!("{}", e) - })); - - // initialize logging - logging::setup_logging(&config); - - // load persistent torrents if enabled - if config.persistence { - info!("Loading persistent torrents into memory..."); - if tracker.load_torrents().await.is_err() { - panic!("Could not load persistent torrents.") - }; - info!("Persistent torrents loaded."); - - let _torrent_periodic_job = start_torrent_periodic_job(config.clone(), tracker.clone()); - } - - // start torrent cleanup job (periodically removes old peers) - let _torrent_cleanup_job = start_torrent_cleanup_job(config.clone(), tracker.clone()); - - // start HTTP API server - if config.http_api.enabled { - let _api_server = start_api_server(&config.http_api, tracker.clone()); - } - - // used to send graceful shutdown signal to udp listeners - let (tx, rx) = tokio::sync::watch::channel(false); - let mut udp_server_handles = Vec::new(); - - // start the udp blocks - for udp_tracker in &config.udp_trackers { - if !udp_tracker.enabled { continue; } - - if tracker.is_private() { - panic!("Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", udp_tracker.bind_address, config.mode); - } - - udp_server_handles.push( - start_udp_tracker_server(&udp_tracker, tracker.clone(), rx.clone()).await - ) - } - - // start the http blocks - for http_tracker in &config.http_trackers { - if !http_tracker.enabled { continue; } - - // SSL requires a cert and a key - if http_tracker.ssl_enabled && !http_tracker.verify_ssl_cert_and_key_set() { - panic!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", http_tracker.bind_address); + // Initialize Torrust tracker + let tracker = match TorrentTracker::new(config.clone()) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) } + }; - let _ = start_http_tracker_server(&http_tracker, tracker.clone()); - } + // Initialize logging + logging::setup_logging(&config); - // start a thread to post statistics - let _ = start_statistics_job(config.clone(), tracker.clone()); + // Run jobs + let jobs = setup::setup(&config, tracker.clone()).await; // handle the signals here tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Torrust shutting down.."); - // send kill signal - let _ = tx.send(true); - - // await for all udp servers to shutdown - futures::future::join_all(udp_server_handles).await; - - // Save torrents if enabled - if config.persistence { - info!("Saving torrents into SQL from memory..."); - let _ = tracker.periodic_saving().await; - info!("Torrents saved"); - } + // Await for all jobs to shutdown + futures::future::join_all(jobs).await; + info!("Torrust successfully shutdown."); } } } - -fn start_torrent_periodic_job(config: Arc, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.persistence_interval.unwrap_or(900); - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; // first tick is immediate... - // periodically call tracker.cleanup_torrents() - loop { - interval.tick().await; - if let Some(tracker) = weak_tracker.upgrade() { - info!("Executing periodic saving..."); - tracker.periodic_saving().await; - info!("Periodic saving done."); - } else { - break; - } - } - }) -} - -fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.cleanup_interval.unwrap_or(600); - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; // first tick is immediate... - // periodically call tracker.cleanup_torrents() - loop { - interval.tick().await; - if let Some(tracker) = weak_tracker.upgrade() { - tracker.cleanup_torrents().await; - } else { - break; - } - } - }) -} - -fn start_statistics_job(config: Arc, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.log_interval.unwrap_or(60); - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; // first tick is immediate... - // periodically call tracker.cleanup_torrents() - loop { - interval.tick().await; - if let Some(tracker) = weak_tracker.upgrade() { - tracker.post_log().await; - } else { - break; - } - } - }) -} - -fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> JoinHandle<()> { - info!("Starting HTTP API server on: {}", config.bind_address); - let bind_addr = config.bind_address.parse::().unwrap(); - - tokio::spawn(async move { - let server = http_api_server::build_server(tracker); - let _ = server.bind(bind_addr).await; - }) -} - -fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - let http_tracker = HttpServer::new(tracker); - let bind_addr = config.bind_address.parse::().unwrap(); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - tokio::spawn(async move { - // run with tls if ssl_enabled and cert and key path are set - if ssl_enabled { - info!("Starting HTTPS server on: {} (TLS)", bind_addr); - http_tracker.start_tls(bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; - } else { - info!("Starting HTTP server on: {}", bind_addr); - http_tracker.start(bind_addr).await; - } - }) -} - -async fn start_udp_tracker_server(config: &UdpTrackerConfig, tracker: Arc, rx: tokio::sync::watch::Receiver) -> JoinHandle<()> { - let udp_server = UdpServer::new(tracker, &config.bind_address).await.unwrap_or_else(|e| { - panic!("Could not start UDP server: {}", e); - }); - - info!("Starting UDP server on: {}", config.bind_address); - tokio::spawn(async move { - udp_server.start(rx).await; - }) -} diff --git a/src/common.rs b/src/protocol/common.rs similarity index 100% rename from src/common.rs rename to src/protocol/common.rs diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs new file mode 100644 index 000000000..99cfd91e4 --- /dev/null +++ b/src/protocol/mod.rs @@ -0,0 +1,2 @@ +pub mod common; +pub mod utils; diff --git a/src/utils.rs b/src/protocol/utils.rs similarity index 100% rename from src/utils.rs rename to src/protocol/utils.rs diff --git a/src/setup.rs b/src/setup.rs new file mode 100644 index 000000000..b8d49614d --- /dev/null +++ b/src/setup.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; +use log::{info, warn}; +use tokio::task::JoinHandle; +use crate::{Configuration}; +use crate::jobs::{http_tracker, log_statistics, persistent_torrent_statistics, torrent_cleanup, tracker_api, udp_tracker}; +use crate::tracker::tracker::TorrentTracker; + +pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ + let mut jobs: Vec> = Vec::new(); + + // Load persistent torrents + if config.persistence { + info!("Loading persistent torrents into memory.."); + tracker.load_persistent_torrents().await.expect("Could not load persistent torrents."); + info!("Persistent torrents loaded."); + jobs.push(persistent_torrent_statistics::start_job(&config, tracker.clone())); + } + + // Start the UDP blocks + for udp_tracker_config in &config.udp_trackers { + if !udp_tracker_config.enabled { continue; } + + if tracker.is_private() { + warn!("Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", udp_tracker_config.bind_address, config.mode); + } else { + jobs.push(udp_tracker::start_job(&udp_tracker_config, tracker.clone())) + } + } + + // Start the HTTP blocks + for http_tracker_config in &config.http_trackers { + if !http_tracker_config.enabled { continue; } + jobs.push(http_tracker::start_job(&http_tracker_config, tracker.clone())); + } + + // Start HTTP API server + if config.http_api.enabled { + jobs.push(tracker_api::start_job(&config, tracker.clone())); + } + + // Remove torrents without peers, every interval + if config.cleanup_interval > 0 { + jobs.push(torrent_cleanup::start_job(&config, tracker.clone())); + } + + // Log detailed torrent stats + if let Some(log_interval) = config.log_interval { + if log_interval > 0 { + jobs.push(log_statistics::start_job(&config, tracker.clone())); + } + } + + jobs +} diff --git a/src/key_manager.rs b/src/tracker/key.rs similarity index 85% rename from src/key_manager.rs rename to src/tracker/key.rs index 507402358..2e2ca81f7 100644 --- a/src/key_manager.rs +++ b/src/tracker/key.rs @@ -4,9 +4,9 @@ use rand::{Rng, thread_rng}; use rand::distributions::Alphanumeric; use serde::Serialize; -use crate::utils::current_time; +use crate::protocol::utils::current_time; -use super::common::AUTH_KEY_LENGTH; +use crate::AUTH_KEY_LENGTH; pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { let key: String = thread_rng() @@ -81,11 +81,11 @@ impl From for Error { #[cfg(test)] mod tests { - use crate::key_manager; + use crate::tracker::key; #[test] fn auth_key_from_buffer() { - let auth_key = key_manager::AuthKey::from_buffer( + let auth_key = key::AuthKey::from_buffer( [ 89, 90, 83, 108, 52, 108, 77, 90, @@ -104,7 +104,7 @@ mod tests { #[test] fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = key_manager::AuthKey::from_string(key_string); + let auth_key = key::AuthKey::from_string(key_string); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().key, key_string); @@ -112,16 +112,16 @@ mod tests { #[test] fn generate_valid_auth_key() { - let auth_key = key_manager::generate_auth_key(9999); + let auth_key = key::generate_auth_key(9999); - assert!(key_manager::verify_auth_key(&auth_key).is_ok()); + assert!(key::verify_auth_key(&auth_key).is_ok()); } #[test] fn generate_expired_auth_key() { - let mut auth_key = key_manager::generate_auth_key(0); + let mut auth_key = key::generate_auth_key(0); auth_key.valid_until = Some(0); - assert!(key_manager::verify_auth_key(&auth_key).is_err()); + assert!(key::verify_auth_key(&auth_key).is_err()); } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs new file mode 100644 index 000000000..6115bc23e --- /dev/null +++ b/src/tracker/mod.rs @@ -0,0 +1,5 @@ +pub mod tracker; +pub mod statistics; +pub mod peer; +pub mod torrent; +pub mod key; diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs new file mode 100644 index 000000000..2a54e2fd2 --- /dev/null +++ b/src/tracker/peer.rs @@ -0,0 +1,83 @@ +use std::net::{IpAddr, SocketAddr}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use serde; +use serde::{Serialize}; +use crate::protocol::common::{NumberOfBytesDef, AnnounceEventDef}; +use crate::http::AnnounceRequest; +use crate::PeerId; + +#[derive(PartialEq, Eq, Debug, Clone, Serialize)] +pub struct TorrentPeer { + pub peer_id: PeerId, + pub peer_addr: SocketAddr, + #[serde(serialize_with = "ser_instant")] + pub updated: std::time::Instant, + #[serde(with = "NumberOfBytesDef")] + pub uploaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub downloaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub left: NumberOfBytes, + #[serde(with = "AnnounceEventDef")] + pub event: AnnounceEvent, +} + +impl TorrentPeer { + pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); + + TorrentPeer { + peer_id: PeerId(announce_request.peer_id.0), + peer_addr, + updated: std::time::Instant::now(), + uploaded: announce_request.bytes_uploaded, + downloaded: announce_request.bytes_downloaded, + left: announce_request.bytes_left, + event: announce_request.event, + } + } + + pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); + + let event: AnnounceEvent = if let Some(event) = &announce_request.event { + match event.as_ref() { + "started" => AnnounceEvent::Started, + "stopped" => AnnounceEvent::Stopped, + "completed" => AnnounceEvent::Completed, + _ => AnnounceEvent::None + } + } else { + AnnounceEvent::None + }; + + TorrentPeer { + peer_id: announce_request.peer_id.clone(), + peer_addr, + updated: std::time::Instant::now(), + uploaded: NumberOfBytes(announce_request.uploaded as i64), + downloaded: NumberOfBytes(announce_request.downloaded as i64), + left: NumberOfBytes(announce_request.left as i64), + event, + } + } + + // potentially substitute localhost ip with external ip + pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { + if remote_ip.is_loopback() && host_opt_ip.is_some() { + SocketAddr::new(host_opt_ip.unwrap(), port) + } else { + SocketAddr::new(remote_ip, port) + } + } + + pub(crate) fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + + pub(crate) fn is_completed(&self) -> bool { + self.event == AnnounceEvent::Completed + } +} + +fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { + ser.serialize_u64(inst.elapsed().as_millis() as u64) +} diff --git a/src/tracker_stats.rs b/src/tracker/statistics.rs similarity index 67% rename from src/tracker_stats.rs rename to src/tracker/statistics.rs index 0bcd781ba..5822d2d4e 100644 --- a/src/tracker_stats.rs +++ b/src/tracker/statistics.rs @@ -7,7 +7,7 @@ use tokio::sync::mpsc::error::SendError; const CHANNEL_BUFFER_SIZE: usize = 65_535; #[derive(Debug)] -pub enum TrackerStatsEvent { +pub enum TrackerStatisticsEvent { Tcp4Announce, Tcp4Scrape, Tcp6Announce, @@ -21,7 +21,7 @@ pub enum TrackerStatsEvent { } #[derive(Debug)] -pub struct TrackerStats { +pub struct TrackerStatistics { pub tcp4_connections_handled: u64, pub tcp4_announces_handled: u64, pub tcp4_scrapes_handled: u64, @@ -36,7 +36,7 @@ pub struct TrackerStats { pub udp6_scrapes_handled: u64, } -impl TrackerStats { +impl TrackerStatistics { pub fn new() -> Self { Self { tcp4_connections_handled: 0, @@ -56,23 +56,23 @@ impl TrackerStats { } pub struct StatsTracker { - channel_sender: Option>, - pub stats: Arc>, + channel_sender: Option>, + pub stats: Arc>, } impl StatsTracker { pub fn new() -> Self { Self { channel_sender: None, - stats: Arc::new(RwLock::new(TrackerStats::new())), + stats: Arc::new(RwLock::new(TrackerStatistics::new())), } } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { self.stats.read().await } - pub async fn send_event(&self, event: TrackerStatsEvent) -> Option>> { + pub async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { if let Some(tx) = &self.channel_sender { Some(tx.send(event).await) } else { @@ -81,7 +81,7 @@ impl StatsTracker { } pub fn run_worker(&mut self) { - let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); // set send channel on stats_tracker self.channel_sender = Some(tx); @@ -93,28 +93,28 @@ impl StatsTracker { let mut stats_lock = stats.write().await; match event { - TrackerStatsEvent::Tcp4Announce => { + TrackerStatisticsEvent::Tcp4Announce => { stats_lock.tcp4_announces_handled += 1; stats_lock.tcp4_connections_handled += 1; } - TrackerStatsEvent::Tcp4Scrape => { + TrackerStatisticsEvent::Tcp4Scrape => { stats_lock.tcp4_scrapes_handled += 1; stats_lock.tcp4_connections_handled += 1; } - TrackerStatsEvent::Tcp6Announce => { + TrackerStatisticsEvent::Tcp6Announce => { stats_lock.tcp6_announces_handled += 1; stats_lock.tcp6_connections_handled += 1; } - TrackerStatsEvent::Tcp6Scrape => { + TrackerStatisticsEvent::Tcp6Scrape => { stats_lock.tcp6_scrapes_handled += 1; stats_lock.tcp6_connections_handled += 1; } - TrackerStatsEvent::Udp4Connect => { stats_lock.udp4_connections_handled += 1; } - TrackerStatsEvent::Udp4Announce => { stats_lock.udp4_announces_handled += 1; } - TrackerStatsEvent::Udp4Scrape => { stats_lock.udp4_scrapes_handled += 1; } - TrackerStatsEvent::Udp6Connect => { stats_lock.udp6_connections_handled += 1; } - TrackerStatsEvent::Udp6Announce => { stats_lock.udp6_announces_handled += 1; } - TrackerStatsEvent::Udp6Scrape => { stats_lock.udp6_scrapes_handled += 1; } + TrackerStatisticsEvent::Udp4Connect => { stats_lock.udp4_connections_handled += 1; } + TrackerStatisticsEvent::Udp4Announce => { stats_lock.udp4_announces_handled += 1; } + TrackerStatisticsEvent::Udp4Scrape => { stats_lock.udp4_scrapes_handled += 1; } + TrackerStatisticsEvent::Udp6Connect => { stats_lock.udp6_connections_handled += 1; } + TrackerStatisticsEvent::Udp6Announce => { stats_lock.udp6_announces_handled += 1; } + TrackerStatisticsEvent::Udp6Scrape => { stats_lock.udp6_scrapes_handled += 1; } } drop(stats_lock); diff --git a/src/torrent.rs b/src/tracker/torrent.rs similarity index 57% rename from src/torrent.rs rename to src/tracker/torrent.rs index 4e44a995a..c5c721a4a 100644 --- a/src/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,88 +1,11 @@ use std::borrow::Cow; -use std::net::{IpAddr, SocketAddr}; +use std::net::{IpAddr}; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use aquatic_udp_protocol::{AnnounceEvent}; use serde::{Deserialize, Serialize}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; -use crate::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::http::AnnounceRequest; - -#[derive(PartialEq, Eq, Debug, Clone, Serialize)] -pub struct TorrentPeer { - pub peer_id: PeerId, - pub peer_addr: SocketAddr, - #[serde(serialize_with = "ser_instant")] - pub updated: std::time::Instant, - #[serde(with = "NumberOfBytesDef")] - pub uploaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub downloaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub left: NumberOfBytes, - #[serde(with = "AnnounceEventDef")] - pub event: AnnounceEvent, -} - -impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - - TorrentPeer { - peer_id: PeerId(announce_request.peer_id.0), - peer_addr, - updated: std::time::Instant::now(), - uploaded: announce_request.bytes_uploaded, - downloaded: announce_request.bytes_downloaded, - left: announce_request.bytes_left, - event: announce_request.event, - } - } - - pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); - - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None - } - } else { - AnnounceEvent::None - }; - - TorrentPeer { - peer_id: announce_request.peer_id.clone(), - peer_addr, - updated: std::time::Instant::now(), - uploaded: NumberOfBytes(announce_request.uploaded as i64), - downloaded: NumberOfBytes(announce_request.downloaded as i64), - left: NumberOfBytes(announce_request.left as i64), - event, - } - } - - // potentially substitute localhost ip with external ip - pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if remote_ip.is_loopback() && host_opt_ip.is_some() { - SocketAddr::new(host_opt_ip.unwrap(), port) - } else { - SocketAddr::new(remote_ip, port) - } - } - - pub(crate) fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } - - fn is_completed(&self) -> bool { - self.event == AnnounceEvent::Completed - } -} - -fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) -} +use crate::peer::TorrentPeer; #[derive(Serialize, Deserialize, Clone)] pub struct TorrentEntry { diff --git a/src/tracker.rs b/src/tracker/tracker.rs similarity index 90% rename from src/tracker.rs rename to src/tracker/tracker.rs index b8c2b9931..4f0d571c6 100644 --- a/src/tracker.rs +++ b/src/tracker/tracker.rs @@ -3,20 +3,22 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::{debug, info}; +use log::info; use serde::{Deserialize, Serialize}; use serde; use tokio::sync::{RwLock, RwLockReadGuard}; -use crate::{Configuration, key_manager}; -use crate::common::InfoHash; +use crate::Configuration; +use crate::protocol::common::InfoHash; use crate::databases::database::Database; use tokio::sync::mpsc::error::SendError; use crate::databases::database; -use crate::key_manager::AuthKey; -use crate::key_manager::Error::KeyInvalid; -use crate::torrent::{TorrentEntry, TorrentError, TorrentPeer, TorrentStats}; -use crate::tracker_stats::{StatsTracker, TrackerStats, TrackerStatsEvent}; +use crate::peer::TorrentPeer; +use crate::tracker::key::AuthKey; +use crate::tracker::key::Error::KeyInvalid; +use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; +use crate::tracker::key; +use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] pub enum TrackerMode { @@ -79,7 +81,7 @@ impl TorrentTracker { } pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { - let auth_key = key_manager::generate_auth_key(seconds_valid); + let auth_key = key::generate_auth_key(seconds_valid); // add key to database if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } @@ -91,9 +93,9 @@ impl TorrentTracker { self.database.remove_key_from_keys(key).await } - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key_manager::Error> { + pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { let db_key = self.database.get_key_from_keys(&auth_key.key).await.map_err(|_| KeyInvalid)?; - key_manager::verify_auth_key(&db_key) + key::verify_auth_key(&db_key) } pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { @@ -124,13 +126,22 @@ impl TorrentTracker { Ok(()) } - // Loading the torrents into memory + // Loading the torrents from database into memory pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { - let torrents = self.database.load_persistent_torrent_data().await?; + let persistent_torrents = self.database.load_persistent_torrents().await?; + let mut torrents = self.torrents.write().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(&info_hash) { continue; } + + let torrent_entry = TorrentEntry { + peers: Default::default(), + completed, + seeders: Default::default(), + }; - for torrent in torrents { - debug!("{:#?}", torrent); - let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await; + torrents.insert(info_hash.clone(), torrent_entry); } Ok(()) @@ -206,34 +217,15 @@ impl TorrentTracker { } } - pub async fn add_torrent(&self, info_hash: InfoHash, seeders: u32, completed: u32, leechers: u32) -> TorrentStats { - let mut torrents = self.torrents.write().await; - - if !torrents.contains_key(&info_hash) { - let torrent_entry = TorrentEntry { - peers: Default::default(), - completed, - seeders, - }; - torrents.insert(info_hash.clone(), torrent_entry); - } - - TorrentStats { - seeders, - completed, - leechers, - } - } - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { self.torrents.read().await } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { self.stats_tracker.get_stats().await } - pub async fn send_stats_event(&self, event: TrackerStatsEvent) -> Option>> { + pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { self.stats_tracker.send_event(event).await } @@ -252,8 +244,6 @@ impl TorrentTracker { // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { - info!("Cleaning torrents..."); - let lock = self.torrents.write().await; // First we create a mapping of all the torrent hashes in a vector, and we use this to iterate through the btreemap. @@ -298,7 +288,6 @@ impl TorrentTracker { } drop(lock); } - info!("Torrents cleaned up."); } pub async fn periodic_saving(&self) { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 23fac0405..4ea767c0b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -3,12 +3,14 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentTracker}; -use crate::torrent::{TorrentError, TorrentPeer}; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; +use crate::peer::TorrentPeer; +use crate::tracker::torrent::{TorrentError}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; -use crate::tracker_stats::TrackerStatsEvent; -use crate::utils::get_connection_id; +use crate::tracker::statistics::TrackerStatisticsEvent; +use crate::tracker::tracker::TorrentTracker; +use crate::protocol::utils::get_connection_id; pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, &None).await { @@ -77,8 +79,8 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Connect).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Connect).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Connect).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Connect).await; } } Ok(response) @@ -136,8 +138,8 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Announce).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Announce).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Announce).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Announce).await; } } Ok(announce_response) @@ -178,8 +180,8 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp4Scrape).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatsEvent::Udp6Scrape).await; } + SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; } + SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Scrape).await; } } Ok(Response::from(ScrapeResponse { diff --git a/src/udp/server.rs b/src/udp/server.rs index 03745192b..bcacc2642 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -6,7 +6,7 @@ use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::TorrentTracker; +use crate::tracker::tracker::TorrentTracker; use crate::udp::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { @@ -24,16 +24,15 @@ impl UdpServer { }) } - pub async fn start(&self, rx: tokio::sync::watch::Receiver) { + pub async fn start(&self) { loop { - let mut rx = rx.clone(); let mut data = [0; MAX_PACKET_SIZE]; let socket = self.socket.clone(); let tracker = self.tracker.clone(); tokio::select! { - _ = rx.changed() => { - info!("Stopping UDP server: {}...", socket.local_addr().unwrap()); + _ = tokio::signal::ctrl_c() => { + info!("Stopping UDP server: {}..", socket.local_addr().unwrap()); break; } Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { From 047c2db9100ae0d52af57d786c15aa237f1d1e03 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 01:45:00 +0200 Subject: [PATCH 038/435] refactor: refactored even more code --- src/api/server.rs | 4 +- src/config.rs | 4 +- src/http/response.rs | 2 + src/protocol/utils.rs | 4 ++ src/tracker/mod.rs | 1 + src/tracker/mode.rs | 21 ++++++++ src/tracker/peer.rs | 13 ++--- src/tracker/torrent.rs | 119 ++++++++++------------------------------- src/tracker/tracker.rs | 59 ++++++-------------- 9 files changed, 80 insertions(+), 147 deletions(-) create mode 100644 src/tracker/mode.rs diff --git a/src/api/server.rs b/src/api/server.rs index 77264162c..d33f17dc0 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -23,7 +23,7 @@ struct Torrent<'a> { completed: u32, leechers: u32, #[serde(skip_serializing_if = "Option::is_none")] - peers: Option>, + peers: Option>, } #[derive(Serialize)] @@ -198,7 +198,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let torrent_entry_option = db.get(&info_hash); if torrent_entry_option.is_none() { - return Err(warp::reject::custom(ActionStatus::Err { reason: "torrent does not exist".into() })); + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")) } let torrent_entry = torrent_entry_option.unwrap(); diff --git a/src/config.rs b/src/config.rs index 10268ebbb..c24d1c45d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize, Serializer}; use toml; use crate::databases::database::DatabaseDrivers; -use crate::tracker::tracker::TrackerMode; +use crate::mode::TrackerMode; #[derive(Serialize, Deserialize, PartialEq)] pub enum TrackerServer { @@ -141,7 +141,7 @@ impl Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), log_interval: Some(60), - mode: TrackerMode::PublicMode, + mode: TrackerMode::Public, db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), statistics: true, diff --git a/src/http/response.rs b/src/http/response.rs index f57129cde..3118f7df1 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -3,6 +3,7 @@ use std::error::Error; use std::io::Write; use std::net::IpAddr; +use serde; use serde::Serialize; #[derive(Serialize)] @@ -15,6 +16,7 @@ pub struct Peer { #[derive(Serialize)] pub struct AnnounceResponse { pub interval: u32, + #[serde(rename = "min interval")] pub interval_min: u32, //pub tracker_id: String, pub complete: u32, diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index fb2a94513..5fc694c8e 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -57,3 +57,7 @@ pub async fn convert_bytes_to_int(array: &Vec) -> u64 { let mut rdr = Cursor::new(array_fixed); return rdr.read_u64::().unwrap(); } + +pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { + ser.serialize_u64(inst.elapsed().as_millis() as u64) +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 6115bc23e..791e2e7d2 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -3,3 +3,4 @@ pub mod statistics; pub mod peer; pub mod torrent; pub mod key; +pub mod mode; diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs new file mode 100644 index 000000000..edcb27f1c --- /dev/null +++ b/src/tracker/mode.rs @@ -0,0 +1,21 @@ +use serde; +use serde::{Serialize, Deserialize}; + +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] +pub enum TrackerMode { + // Will track every new info hash and serve every peer. + #[serde(rename = "public")] + Public, + + // Will only track whitelisted info hashes. + #[serde(rename = "listed")] + Listed, + + // Will only serve authenticated peers + #[serde(rename = "private")] + Private, + + // Will only track whitelisted info hashes and serve authenticated peers + #[serde(rename = "private_listed")] + PrivateListed, +} diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 2a54e2fd2..ce4e52022 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,8 +1,11 @@ use std::net::{IpAddr, SocketAddr}; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::{Serialize}; + use crate::protocol::common::{NumberOfBytesDef, AnnounceEventDef}; +use crate::protocol::utils::ser_instant; use crate::http::AnnounceRequest; use crate::PeerId; @@ -71,13 +74,5 @@ impl TorrentPeer { } } - pub(crate) fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } - - pub(crate) fn is_completed(&self) -> bool { - self.event == AnnounceEvent::Completed - } -} - -fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) + pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index c5c721a4a..a01b5ce55 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,19 +1,16 @@ -use std::borrow::Cow; -use std::net::{IpAddr}; +use std::net::{IpAddr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent}; use serde::{Deserialize, Serialize}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; +use crate::{MAX_SCRAPE_TORRENTS, PeerId}; use crate::peer::TorrentPeer; #[derive(Serialize, Deserialize, Clone)] pub struct TorrentEntry { #[serde(skip)] - pub(crate) peers: std::collections::BTreeMap, - pub(crate) completed: u32, - #[serde(skip)] - pub(crate) seeders: u32, + pub peers: std::collections::BTreeMap, + pub completed: u32, } impl TorrentEntry { @@ -21,113 +18,53 @@ impl TorrentEntry { TorrentEntry { peers: std::collections::BTreeMap::new(), completed: 0, - seeders: 0, } } pub fn update_peer(&mut self, peer: &TorrentPeer) { match peer.event { AnnounceEvent::Stopped => { - let peer_old = self.peers.remove(&peer.peer_id); - self.update_torrent_stats_with_peer(peer, peer_old); + let _ = self.peers.remove(&peer.peer_id); } - _ => { + AnnounceEvent::Completed => { let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); - self.update_torrent_stats_with_peer(peer, peer_old); + // Don't count if peer was not previously known + if peer_old.is_some() { self.completed += 1; } + } + _ => { + let _ = self.peers.insert(peer.peer_id.clone(), peer.clone()); } } } - pub fn get_peers(&self, remote_addr: Option<&std::net::SocketAddr>) -> Vec { - let mut list = Vec::new(); - for (_, peer) in self - .peers - .iter() - .filter(|e| match remote_addr { - // don't filter on ip_version + pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&TorrentPeer> { + self.peers + .values() + .filter(|peer| match client_addr { + // Don't filter on ip_version None => true, - // filter out different ip_version from remote_addr - Some(remote_address) => { - match e.1.peer_addr.ip() { - IpAddr::V4(_) => { remote_address.is_ipv4() } - IpAddr::V6(_) => { remote_address.is_ipv6() } + // Filter out different ip_version from remote_addr + Some(remote_addr) => { + // Skip ip address of client + if peer.peer_addr.ip() == remote_addr.ip() { return false; } + + match peer.peer_addr.ip() { + IpAddr::V4(_) => { remote_addr.is_ipv4() } + IpAddr::V6(_) => { remote_addr.is_ipv6() } } } }) .take(MAX_SCRAPE_TORRENTS as usize) - { - - // skip ip address of client - if let Some(remote_addr) = remote_addr { - if peer.peer_addr == *remote_addr { - continue; - } - } - - list.push(peer.clone()); - } - list - } - - pub fn update_torrent_stats_with_peer(&mut self, peer: &TorrentPeer, peer_old: Option) { - match peer_old { - None => { - if peer.is_seeder() { - self.seeders += 1; - } - - if peer.is_completed() { - self.completed += 1; - } - } - Some(peer_old) => { - match peer.event { - AnnounceEvent::None => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - } - AnnounceEvent::Completed => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - - // don't double count completed - if !peer_old.is_completed() { - self.completed += 1; - } - } - AnnounceEvent::Stopped => { - if peer_old.is_seeder() { - if self.seeders != 0 { - self.seeders -= 1; - } - } - } - // impossible, started should be the first time a peer announces itself - AnnounceEvent::Started => {} - } - } - } + .collect() } pub fn get_stats(&self) -> (u32, u32, u32) { - let leechers: u32 = if self.seeders < (self.peers.len() as u32) { - (self.peers.len() as u32) - self.seeders - } else { - 0 - }; - - (self.seeders, self.completed, leechers) + let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; + let leechers: u32 = self.peers.len() as u32 - seeders; + (seeders, self.completed, leechers) } } -#[derive(Serialize, Deserialize)] -struct DatabaseRow<'a> { - info_hash: InfoHash, - entry: Cow<'a, TorrentEntry>, -} - #[derive(Debug)] pub struct TorrentStats { pub completed: u32, diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 4f0d571c6..d3e7c8faa 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -4,15 +4,14 @@ use std::net::SocketAddr; use std::sync::Arc; use log::info; -use serde::{Deserialize, Serialize}; -use serde; use tokio::sync::{RwLock, RwLockReadGuard}; +use tokio::sync::mpsc::error::SendError; use crate::Configuration; use crate::protocol::common::InfoHash; use crate::databases::database::Database; -use tokio::sync::mpsc::error::SendError; use crate::databases::database; +use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::tracker::key::AuthKey; use crate::tracker::key::Error::KeyInvalid; @@ -20,28 +19,9 @@ use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent} use crate::tracker::key; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] -pub enum TrackerMode { - // Will track every new info hash and serve every peer. - #[serde(rename = "public")] - PublicMode, - - // Will only track whitelisted info hashes. - #[serde(rename = "listed")] - ListedMode, - - // Will only serve authenticated peers - #[serde(rename = "private")] - PrivateMode, - - // Will only track whitelisted info hashes and serve authenticated peers - #[serde(rename = "private_listed")] - PrivateListedMode, -} - pub struct TorrentTracker { - mode: TrackerMode, pub config: Arc, + mode: TrackerMode, torrents: RwLock>, updates: RwLock>, shadow: RwLock>, @@ -58,8 +38,8 @@ impl TorrentTracker { if config.statistics { stats_tracker.run_worker(); } Ok(TorrentTracker { - mode: config.mode, config: config.clone(), + mode: config.mode, torrents: RwLock::new(std::collections::BTreeMap::new()), updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), @@ -69,15 +49,15 @@ impl TorrentTracker { } pub fn is_public(&self) -> bool { - self.mode == TrackerMode::PublicMode + self.mode == TrackerMode::Public } pub fn is_private(&self) -> bool { - self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode + self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed } pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::ListedMode || self.mode == TrackerMode::PrivateListedMode + self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed } pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { @@ -138,7 +118,6 @@ impl TorrentTracker { let torrent_entry = TorrentEntry { peers: Default::default(), completed, - seeders: Default::default(), }; torrents.insert(info_hash.clone(), torrent_entry); @@ -171,16 +150,13 @@ impl TorrentTracker { } - pub async fn get_torrent_peers( - &self, - info_hash: &InfoHash, - peer_addr: &SocketAddr, - ) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr, ) -> Vec { let read_lock = self.torrents.read().await; + match read_lock.get(info_hash) { None => vec![], Some(entry) => { - entry.get_peers(Some(peer_addr)) + entry.get_peers(Some(client_addr)).into_iter().cloned().collect() } } } @@ -242,6 +218,7 @@ impl TorrentTracker { info!("-=[ Stats ]=- | Torrents: {} | Updates: {} | Shadow: {}", torrents_size, updates_size, shadow_size); } + // todo: refactor // remove torrents without peers if enabled, and defragment memory pub async fn cleanup_torrents(&self) { let lock = self.torrents.write().await; @@ -260,7 +237,6 @@ impl TorrentTracker { let mut torrent = TorrentEntry { peers: BTreeMap::new(), completed: 0, - seeders: 0, }; let lock = self.torrents.write().await; @@ -273,13 +249,10 @@ impl TorrentTracker { continue; } torrent.peers.insert(peer_id.clone(), peer.clone()); - if peer.is_seeder() { - torrent.seeders += 1; - } } let mut lock = self.torrents.write().await; lock.remove(hash); - if self.config.mode.clone() == TrackerMode::PublicMode && self.config.cleanup_peerless && !self.config.persistence { + if self.config.mode.clone() == TrackerMode::Public && self.config.cleanup_peerless && !self.config.persistence { if torrent.peers.len() != 0 { lock.insert(hash.clone(), torrent); } @@ -290,6 +263,7 @@ impl TorrentTracker { } } + // todo: refactor pub async fn periodic_saving(&self) { // Get a lock for writing // let mut shadow = self.shadow.write().await; @@ -298,14 +272,14 @@ impl TorrentTracker { let mut updates = self.updates.write().await; let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); // let mut torrent_hashes: Vec = Vec::new(); - info!("Copying updates to updates_cloned..."); + // Copying updates to updates_cloned for (k, completed) in updates.iter() { updates_cloned.insert(k.clone(), completed.clone()); } updates.clear(); drop(updates); - info!("Copying updates_cloned into the shadow to overwrite..."); + // Copying updates_cloned into the shadow to overwrite for (k, completed) in updates_cloned.iter() { let mut shadows = self.shadow.write().await; if shadows.contains_key(k) { @@ -317,14 +291,13 @@ impl TorrentTracker { drop(updates_cloned); // We updated the shadow data from the updates data, let's handle shadow data as expected. - info!("Handle shadow_copy to be updated into SQL..."); + // Handle shadow_copy to be updated into SQL let mut shadow_copy: BTreeMap = BTreeMap::new(); let shadows = self.shadow.read().await; for (infohash, completed) in shadows.iter() { shadow_copy.insert(infohash.clone(), TorrentEntry { peers: Default::default(), completed: completed.clone(), - seeders: 0, }); } drop(shadows); From 6fcc043c0e38091b55add786f4e9c33f68a0ed73 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 01:49:16 +0200 Subject: [PATCH 039/435] refactor: removed unused enum in config --- src/config.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/config.rs b/src/config.rs index c24d1c45d..729406163 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,12 +12,6 @@ use toml; use crate::databases::database::DatabaseDrivers; use crate::mode::TrackerMode; -#[derive(Serialize, Deserialize, PartialEq)] -pub enum TrackerServer { - UDP, - HTTP, -} - #[derive(Serialize, Deserialize, Debug)] pub struct UdpTrackerConfig { pub enabled: bool, From abe2e685444aa0ba2925dfe55668fdca2f519af5 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 02:02:04 +0200 Subject: [PATCH 040/435] refactor: removed unused byteorder dependency --- Cargo.lock | 206 ++++++++++++++++++++++-------------------- Cargo.toml | 19 ++-- src/protocol/utils.rs | 27 ------ 3 files changed, 120 insertions(+), 132 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56ecd77a7..4d6b9b17a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -40,7 +40,7 @@ dependencies = [ [[package]] name = "aquatic_udp_protocol" version = "0.2.0" -source = "git+https://github.com/greatest-ape/aquatic#26e2e874377a2682f52568f8e5e8c080c3366326" +source = "git+https://github.com/greatest-ape/aquatic#99792eefc3a0cfb15dc9bbd351af94b14a44e9fc" dependencies = [ "byteorder", "either", @@ -88,9 +88,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "dc19a4937b4fbd3fe3379793130e42060d10627a360f2127802b10b87e7baf74" [[package]] name = "base64" @@ -106,8 +106,8 @@ checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256" dependencies = [ "num-bigint 0.3.3", "num-integer", - "num-traits 0.2.14", - "serde 1.0.136", + "num-traits 0.2.15", + "serde 1.0.137", ] [[package]] @@ -238,8 +238,8 @@ checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ "libc", "num-integer", - "num-traits 0.2.14", - "serde 1.0.136", + "num-traits 0.2.15", + "serde 1.0.137", "time 0.1.44", "winapi", ] @@ -288,7 +288,7 @@ dependencies = [ "lazy_static", "nom", "rust-ini", - "serde 1.0.136", + "serde 1.0.137", "serde-hjson", "serde_json", "toml", @@ -442,9 +442,9 @@ dependencies = [ [[package]] name = "fern" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9a4820f0ccc8a7afd67c39a0f1a0f4b07ca1725164271a64939d7aeb9af065" +checksum = "3bdd7b0849075e79ee9a1836df22c717d1eba30451796fdc631b04565dd11e2a" dependencies = [ "log", ] @@ -749,9 +749,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" dependencies = [ "bytes", "fnv", @@ -771,9 +771,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6330e8a36bd8c859f3fa6d9382911fbb7147ec39807f63b923933a247240b9ba" +checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" [[package]] name = "httpdate" @@ -905,9 +905,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.123" +version = "0.2.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb691a747a7ab48abc15c5b42066eaafde10dc427e3b6ee2a1cf43db04c763bd" +checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" [[package]] name = "libloading" @@ -931,9 +931,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f35facd4a5673cb5a48822be2be1d4236c1c99cb4113cab7061ac720d5bf859" +checksum = "92e7e15d7610cce1d9752e137625f14e61a28cd45929b6e12e47b50fe154ee2e" dependencies = [ "cc", "pkg-config", @@ -958,9 +958,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] @@ -991,9 +991,9 @@ checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" @@ -1088,7 +1088,7 @@ dependencies = [ "once_cell", "pem", "percent-encoding", - "serde 1.0.136", + "serde 1.0.137", "serde_json", "socket2", "twox-hash", @@ -1117,12 +1117,12 @@ dependencies = [ "lazy_static", "lexical", "num-bigint 0.4.3", - "num-traits 0.2.14", + "num-traits 0.2.15", "rand", "regex", "rust_decimal", "saturating", - "serde 1.0.136", + "serde 1.0.137", "serde_json", "sha1", "sha2", @@ -1201,7 +1201,7 @@ checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.14", + "num-traits 0.2.15", ] [[package]] @@ -1212,17 +1212,17 @@ checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.14", + "num-traits 0.2.15", ] [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", - "num-traits 0.2.14", + "num-traits 0.2.15", ] [[package]] @@ -1231,14 +1231,14 @@ version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" dependencies = [ - "num-traits 0.2.14", + "num-traits 0.2.15", ] [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] @@ -1267,18 +1267,30 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.38" +version = "0.10.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" +checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" dependencies = [ "bitflags", "cfg-if", "foreign-types", "libc", "once_cell", + "openssl-macros", "openssl-sys", ] +[[package]] +name = "openssl-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "openssl-probe" version = "0.1.5" @@ -1287,9 +1299,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.72" +version = "0.9.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" +checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0" dependencies = [ "autocfg", "cc", @@ -1316,7 +1328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" dependencies = [ "lock_api", - "parking_lot_core 0.9.2", + "parking_lot_core 0.9.3", ] [[package]] @@ -1335,9 +1347,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "995f667a6c822200b0433ac218e05582f0e2efa1b922a3fd2fbaadc5f87bab37" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ "cfg-if", "libc", @@ -1391,9 +1403,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -1421,9 +1433,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" +checksum = "9027b48e9d4c9175fa2218adf3557f91c1137021739951d4932f5f8268ac48aa" dependencies = [ "unicode-xid", ] @@ -1589,8 +1601,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22dc69eadbf0ee2110b8d20418c0c6edbaefec2811c4963dc17b6344e11fe0f8" dependencies = [ "arrayvec 0.7.2", - "num-traits 0.2.14", - "serde 1.0.136", + "num-traits 0.2.15", + "serde 1.0.137", ] [[package]] @@ -1614,7 +1626,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.7", + "semver 1.0.9", ] [[package]] @@ -1723,9 +1735,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65bd28f48be7196d222d95b9243287f48d27aca604e08497513019ff0502cc4" +checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" [[package]] name = "semver-parser" @@ -1741,9 +1753,9 @@ checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" [[package]] name = "serde" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" dependencies = [ "serde_derive", ] @@ -1766,24 +1778,24 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" dependencies = [ - "serde 1.0.136", + "serde 1.0.137", "serde_bytes", ] [[package]] name = "serde_bytes" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" +checksum = "212e73464ebcde48d723aa02eb270ba62eff38a9b732df31f33f1b4e145f3a54" dependencies = [ - "serde 1.0.136", + "serde 1.0.137", ] [[package]] name = "serde_derive" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" dependencies = [ "proc-macro2", "quote", @@ -1792,13 +1804,13 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ "itoa", "ryu", - "serde 1.0.136", + "serde 1.0.137", ] [[package]] @@ -1810,7 +1822,7 @@ dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.136", + "serde 1.0.137", ] [[package]] @@ -1945,7 +1957,7 @@ checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ "proc-macro2", "quote", - "serde 1.0.136", + "serde 1.0.137", "serde_derive", "syn", ] @@ -1959,7 +1971,7 @@ dependencies = [ "base-x", "proc-macro2", "quote", - "serde 1.0.136", + "serde 1.0.137", "serde_derive", "serde_json", "sha1", @@ -1990,9 +2002,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" +checksum = "7ff7c592601f11445996a06f8ad0c27f094a58857c2f89e97974ab9235b92c52" dependencies = [ "proc-macro2", "quote", @@ -2039,18 +2051,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ "proc-macro2", "quote", @@ -2108,9 +2120,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -2123,9 +2135,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.17.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" +checksum = "dce653fb475565de9f6fb0614b28bca8df2c430c0cf84bcd9c843f15de5414cc" dependencies = [ "bytes", "libc", @@ -2221,7 +2233,7 @@ version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ - "serde 1.0.136", + "serde 1.0.137", ] [[package]] @@ -2231,7 +2243,6 @@ dependencies = [ "aquatic_udp_protocol", "async-trait", "binascii", - "byteorder", "chrono", "config", "derive_more", @@ -2244,9 +2255,8 @@ dependencies = [ "r2d2_mysql", "r2d2_sqlite", "rand", - "serde 1.0.136", + "serde 1.0.137", "serde_bencode", - "serde_bytes", "serde_json", "thiserror", "tokio", @@ -2275,9 +2285,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" +checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" dependencies = [ "proc-macro2", "quote", @@ -2329,9 +2339,9 @@ dependencies = [ [[package]] name = "twox-hash" -version = "1.6.2" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "rand", @@ -2355,9 +2365,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-normalization" @@ -2376,9 +2386,9 @@ checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "untrusted" @@ -2457,7 +2467,7 @@ dependencies = [ "percent-encoding", "pin-project", "scoped-tls", - "serde 1.0.136", + "serde 1.0.137", "serde_json", "serde_urlencoded", "tokio", @@ -2597,9 +2607,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5acdd78cb4ba54c0045ac14f62d8f94a03d10047904ae2a40afa1e99d8f70825" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ "windows_aarch64_msvc", "windows_i686_gnu", @@ -2610,33 +2620,33 @@ dependencies = [ [[package]] name = "windows_aarch64_msvc" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_i686_gnu" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_msvc" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_x86_64_gnu" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_msvc" -version = "0.34.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] name = "wyz" diff --git a/Cargo.toml b/Cargo.toml index cc97072a5..420e772fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,27 +11,32 @@ debug = 1 lto = "fat" [dependencies] +tokio = { version = "1.7", features = ["full"] } + serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" -serde_bytes = "0.11" serde_json = "1.0.72" hex = "0.4.3" percent-encoding = "2.1.0" -warp = { version = "0.3", features = ["tls"] } -tokio = { version = "1.7", features = ["full"] } binascii = "0.1" + +warp = { version = "0.3", features = ["tls"] } + +config = "0.11" toml = "0.5" + log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" -byteorder = "1" + +r2d2 = "0.8.8" r2d2_mysql = "21.0.0" r2d2_sqlite = "0.16.0" -r2d2 = "0.8.8" + rand = "0.8.4" -config = "0.11" derive_more = "0.99" thiserror = "1.0" -aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } futures = "0.3.21" async-trait = "0.1.52" + +aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 5fc694c8e..392966307 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,11 +1,9 @@ use std::error::Error; use std::fmt::Write; -use std::io::Cursor; use std::net::SocketAddr; use std::time::SystemTime; use aquatic_udp_protocol::ConnectionId; -use byteorder::{BigEndian, ReadBytesExt}; pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { match std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) { @@ -33,31 +31,6 @@ pub fn url_encode_bytes(content: &[u8]) -> Result> { Ok(out) } -// Function that will convert a small or big number into the smallest form of a byte array. -pub async fn convert_int_to_bytes(number: &u64) -> Vec { - let mut return_data: Vec = Vec::new(); - // return_data.extend(number.to_be_bytes().reverse()); - for i in 1..8 { - if number < &256u64.pow(i) { - let start: usize = 16usize - i as usize; - return_data.extend(number.to_be_bytes()[start..8].iter()); - return return_data; - } - } - return return_data; -} - -pub async fn convert_bytes_to_int(array: &Vec) -> u64 { - let mut array_fixed: Vec = Vec::new(); - let size = 8 - array.len(); - for _ in 0..size { - array_fixed.push(0); - } - array_fixed.extend(array); - let mut rdr = Cursor::new(array_fixed); - return rdr.read_u64::().unwrap(); -} - pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { ser.serialize_u64(inst.elapsed().as_millis() as u64) } From d6dd240c080363daedcad067fb1910c8ae31282d Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 02:05:01 +0200 Subject: [PATCH 041/435] refactor: updated rust version to 2021 --- Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 420e772fd..a98820521 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,11 @@ [package] +edition = "2021" name = "torrust-tracker" version = "2.3.0" license = "AGPL-3.0" authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." -edition = "2018" +repository = "https://github.com/torrust/torrust-tracker" [profile.release] debug = 1 From 278ac3abebf2f8c0bebe8faf7d9d00067da808f8 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 14:11:29 +0200 Subject: [PATCH 042/435] refactor: updated cargo.toml dev and release profiles --- Cargo.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index a98820521..4c2fa4a86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,8 +7,14 @@ authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." repository = "https://github.com/torrust/torrust-tracker" +[profile.dev] +debug = 1 +opt-level = 1 +lto = "thin" + [profile.release] debug = 1 +opt-level = 3 lto = "fat" [dependencies] From 829afb9606d311022892898804d947288e86af49 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 17:49:03 +0200 Subject: [PATCH 043/435] refactor: refactored and improved tracker.cleanup_torrents() --- src/config.rs | 16 +++---- src/jobs/persistent_torrent_statistics.rs | 2 +- src/jobs/torrent_cleanup.rs | 2 +- src/setup.rs | 5 ++- src/tracker/torrent.rs | 6 +++ src/tracker/tracker.rs | 52 ++++++----------------- 6 files changed, 33 insertions(+), 50 deletions(-) diff --git a/src/config.rs b/src/config.rs index 729406163..292b065b4 100644 --- a/src/config.rs +++ b/src/config.rs @@ -53,10 +53,10 @@ pub struct Configuration { pub db_driver: DatabaseDrivers, pub db_path: String, pub statistics: bool, - pub persistence: bool, - pub persistence_interval: Option, - pub cleanup_interval: u64, - pub cleanup_peerless: bool, + pub persistent_torrent_completed_stat: bool, + pub persistence_interval: u64, + pub inactive_peer_cleanup_interval: u64, + pub remove_peerless_torrents: bool, pub external_ip: Option, pub announce_interval: u32, pub announce_interval_min: u32, @@ -139,10 +139,10 @@ impl Configuration { db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), statistics: true, - persistence: false, - persistence_interval: Some(900), - cleanup_interval: 600, - cleanup_peerless: true, + persistent_torrent_completed_stat: false, + persistence_interval: 0, + inactive_peer_cleanup_interval: 600, + remove_peerless_torrents: true, external_ip: Some(String::from("0.0.0.0")), announce_interval: 120, announce_interval_min: 120, diff --git a/src/jobs/persistent_torrent_statistics.rs b/src/jobs/persistent_torrent_statistics.rs index 7ebc80bdb..54ee23b6b 100644 --- a/src/jobs/persistent_torrent_statistics.rs +++ b/src/jobs/persistent_torrent_statistics.rs @@ -6,7 +6,7 @@ use crate::tracker::tracker::TorrentTracker; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.persistence_interval.unwrap_or(900); + let interval = config.persistence_interval; tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 05e639728..7d9967352 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -7,7 +7,7 @@ use crate::tracker::tracker::TorrentTracker; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.cleanup_interval; + let interval = config.inactive_peer_cleanup_interval; tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); diff --git a/src/setup.rs b/src/setup.rs index b8d49614d..76372ef0d 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -8,8 +8,9 @@ use crate::tracker::tracker::TorrentTracker; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ let mut jobs: Vec> = Vec::new(); + // todo: replace by realtime updates // Load persistent torrents - if config.persistence { + if config.persistent_torrent_completed_stat && config.persistence_interval > 0 { info!("Loading persistent torrents into memory.."); tracker.load_persistent_torrents().await.expect("Could not load persistent torrents."); info!("Persistent torrents loaded."); @@ -39,7 +40,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< } // Remove torrents without peers, every interval - if config.cleanup_interval > 0 { + if config.inactive_peer_cleanup_interval > 0 { jobs.push(torrent_cleanup::start_job(&config, tracker.clone())); } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index a01b5ce55..4b891e992 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -63,6 +63,12 @@ impl TorrentEntry { let leechers: u32 = self.peers.len() as u32 - seeders; (seeders, self.completed, leechers) } + + pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { + self.peers.retain(|_, peer| { + peer.updated.elapsed() > std::time::Duration::from_secs(max_peer_timeout as u64) + }); + } } #[derive(Debug)] diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index d3e7c8faa..d25f32923 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -177,7 +177,7 @@ impl TorrentTracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); - if self.config.persistence { + if self.config.persistent_torrent_completed_stat { let mut updates = self.updates.write().await; if updates.contains_key(info_hash) { updates.remove(info_hash); @@ -218,48 +218,24 @@ impl TorrentTracker { info!("-=[ Stats ]=- | Torrents: {} | Updates: {} | Shadow: {}", torrents_size, updates_size, shadow_size); } - // todo: refactor - // remove torrents without peers if enabled, and defragment memory + // Remove inactive peers and (optionally) peerless torrents pub async fn cleanup_torrents(&self) { - let lock = self.torrents.write().await; - - // First we create a mapping of all the torrent hashes in a vector, and we use this to iterate through the btreemap. - // Every hash we have handled, we remove from the btreemap completely, and push it to the top. - let mut torrent_hashes: Vec = Vec::new(); - for (k, _torrent_entry) in lock.iter() { - torrent_hashes.push(k.clone()); - } - - drop(lock); - - // Let's iterate through all torrents, and parse. - for hash in torrent_hashes.iter() { - let mut torrent = TorrentEntry { - peers: BTreeMap::new(), - completed: 0, - }; + let mut torrents_lock = self.torrents.write().await; - let lock = self.torrents.write().await; - let torrent_data = lock.get(hash).unwrap().clone(); - drop(lock); + // If we don't need to remove torrents we will use the faster iter + if self.config.remove_peerless_torrents { + torrents_lock.retain(|_, torrent_entry| { + torrent_entry.remove_inactive_peers(self.config.peer_timeout); - torrent.completed = torrent_data.completed.clone(); - for (peer_id, peer) in torrent_data.peers.iter() { - if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { - continue; - } - torrent.peers.insert(peer_id.clone(), peer.clone()); - } - let mut lock = self.torrents.write().await; - lock.remove(hash); - if self.config.mode.clone() == TrackerMode::Public && self.config.cleanup_peerless && !self.config.persistence { - if torrent.peers.len() != 0 { - lock.insert(hash.clone(), torrent); + match self.config.persistent_torrent_completed_stat { + true => { torrent_entry.completed > 0 || torrent_entry.peers.len() > 0 } + false => { torrent_entry.peers.len() > 0 } } - } else { - lock.insert(hash.clone(), torrent); + }); + } else { + for (_, torrent_entry) in torrents_lock.iter_mut() { + torrent_entry.remove_inactive_peers(self.config.peer_timeout); } - drop(lock); } } From eb69fa00595989b23cdf71f9e4c954669ff018ac Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 18:08:22 +0200 Subject: [PATCH 044/435] feat: added strip to Cargo.toml to almost half compiled binary size on unix systems --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 4c2fa4a86..c320e9e03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ lto = "thin" debug = 1 opt-level = 3 lto = "fat" +strip = true [dependencies] tokio = { version = "1.7", features = ["full"] } From 1d353cadb337b7ab847d47f5b903d931edabc2b8 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sat, 7 May 2022 23:47:08 +0200 Subject: [PATCH 045/435] feat: massively improved key authentication speed in private mode --- src/api/server.rs | 2 +- src/databases/database.rs | 4 +++- src/databases/mysql.rs | 17 +++++++++++++++-- src/databases/sqlite.rs | 24 ++++++++++++++++++++++-- src/setup.rs | 4 ++++ src/tracker/tracker.rs | 38 ++++++++++++++++++++++++++++++++------ 6 files changed, 77 insertions(+), 12 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index d33f17dc0..77496b497 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -289,7 +289,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp }) .and_then(|(key, tracker): (String, Arc)| { async move { - match tracker.remove_auth_key(key).await { + match tracker.remove_auth_key(&key).await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to delete key".into() })) } diff --git a/src/databases/database.rs b/src/databases/database.rs index fd9f2a19d..e2f3cdfe5 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -42,6 +42,8 @@ pub trait Database: Sync + Send { async fn load_persistent_torrents(&self) -> Result, Error>; + async fn load_keys(&self) -> Result, Error>; + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), Error>; async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; @@ -54,7 +56,7 @@ pub trait Database: Sync + Send { async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result; - async fn remove_key_from_keys(&self, key: String) -> Result; + async fn remove_key_from_keys(&self, key: &str) -> Result; } #[derive(Debug, Display, PartialEq, Error)] diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index e7f57a7a4..6fd63b7d9 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use std::str::FromStr; use async_trait::async_trait; -use log::debug; +use log::{debug}; use r2d2::Pool; use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; use r2d2_mysql::mysql::prelude::Queryable; @@ -76,6 +76,19 @@ impl Database for MysqlDatabase { Ok(torrents) } + async fn load_keys(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + + let keys: Vec = conn.query_map("SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| { + AuthKey { + key, + valid_until: Some(valid_until as u64) + } + }).map_err(|_| database::Error::QueryReturnedNoRows)?; + + Ok(keys) + } + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; @@ -190,7 +203,7 @@ impl Database for MysqlDatabase { } } - async fn remove_key_from_keys(&self, key: String) -> Result { + async fn remove_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 18a1d5a28..9c452eadc 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -8,7 +8,7 @@ use r2d2_sqlite::SqliteConnectionManager; use r2d2_sqlite::rusqlite::NO_PARAMS; use crate::{AUTH_KEY_LENGTH, InfoHash}; -use crate::databases::database::Database; +use crate::databases::database::{Database, Error}; use crate::databases::database; use crate::tracker::key::AuthKey; use crate::tracker::torrent::TorrentEntry; @@ -76,6 +76,26 @@ impl Database for SqliteDatabase { Ok(torrents) } + async fn load_keys(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + + let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; + + let keys_iter = stmt.query_map(NO_PARAMS, |row| { + let key = row.get(0)?; + let valid_until: i64 = row.get(1)?; + + Ok(AuthKey { + key, + valid_until: Some(valid_until as u64) + }) + })?; + + let keys: Vec = keys_iter.filter_map(|x| x.ok()).collect(); + + Ok(keys) + } + async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; @@ -174,7 +194,7 @@ impl Database for SqliteDatabase { } } - async fn remove_key_from_keys(&self, key: String) -> Result { + async fn remove_key_from_keys(&self, key: &str) -> Result { let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { diff --git a/src/setup.rs b/src/setup.rs index 76372ef0d..69f2db432 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -8,6 +8,10 @@ use crate::tracker::tracker::TorrentTracker; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ let mut jobs: Vec> = Vec::new(); + if tracker.is_private() { + tracker.load_keys().await.expect("Could not retrieve keys."); + } + // todo: replace by realtime updates // Load persistent torrents if config.persistent_torrent_completed_stat && config.persistence_interval > 0 { diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index d25f32923..fdbaac4de 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -14,7 +14,6 @@ use crate::databases::database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::tracker::key::AuthKey; -use crate::tracker::key::Error::KeyInvalid; use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; use crate::tracker::key; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; @@ -22,6 +21,7 @@ use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; pub struct TorrentTracker { pub config: Arc, mode: TrackerMode, + keys: RwLock>, torrents: RwLock>, updates: RwLock>, shadow: RwLock>, @@ -40,6 +40,7 @@ impl TorrentTracker { Ok(TorrentTracker { config: config.clone(), mode: config.mode, + keys: RwLock::new(std::collections::HashMap::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), updates: RwLock::new(std::collections::HashMap::new()), shadow: RwLock::new(std::collections::HashMap::new()), @@ -66,18 +67,32 @@ impl TorrentTracker { // add key to database if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } + // Add key to in-memory database + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) } - pub async fn remove_auth_key(&self, key: String) -> Result { - self.database.remove_key_from_keys(key).await + pub async fn remove_auth_key(&self, key: &str) -> Result { + self.database.remove_key_from_keys(&key).await?; + + // Remove key from in-memory database + self.keys.write().await.remove(key); + + Ok(1) } pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { - let db_key = self.database.get_key_from_keys(&auth_key.key).await.map_err(|_| KeyInvalid)?; - key::verify_auth_key(&db_key) + let keys_lock = self.keys.read().await; + + if let Some(key) = keys_lock.get(&auth_key.key) { + key::verify_auth_key(key) + } else { + Err(key::Error::KeyInvalid) + } } + // todo: speed this up in non-public modes pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode if self.is_public() { return Ok(()); } @@ -98,7 +113,7 @@ impl TorrentTracker { // check if info_hash is whitelisted if self.is_whitelisted() { - if self.is_info_hash_whitelisted(info_hash).await == false { + if !self.is_info_hash_whitelisted(info_hash).await { return Err(TorrentError::TorrentNotWhitelisted); } } @@ -106,6 +121,17 @@ impl TorrentTracker { Ok(()) } + pub async fn load_keys(&self) -> Result<(), database::Error> { + let keys_from_database = self.database.load_keys().await?; + let mut keys = self.keys.write().await; + + for key in keys_from_database { + let _ = keys.insert(key.key.clone(), key); + } + + Ok(()) + } + // Loading the torrents from database into memory pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; From aac9ac72bdd300ffb2959ba6f2a1ee04ecac5895 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sun, 8 May 2022 17:26:44 +0200 Subject: [PATCH 046/435] fix: http scrape response --- src/http/filters.rs | 2 ++ src/http/handlers.rs | 16 +++++++++------- src/http/response.rs | 25 ++++++++++++++++++++++--- src/protocol/utils.rs | 13 ------------- src/tracker/tracker.rs | 2 +- 5 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index 8f3ee04c0..d5a7881d8 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -75,9 +75,11 @@ async fn info_hashes(raw_query: String) -> WebResult> { for v in split_raw_query { if v.contains("info_hash") { let raw_info_hash = v.split("=").collect::>()[1]; + debug!("Raw info hash: {}", raw_info_hash); let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); if let Ok(ih) = info_hash { + debug!("Parsed info hash: {}", ih.to_string()); info_hashes.push(ih); } } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index d7e4859d9..977aeb4ba 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,7 +13,6 @@ use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::http::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; use crate::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; -use crate::protocol::utils::url_encode_bytes; use crate::tracker::tracker::TorrentTracker; /// Authenticate InfoHash using optional AuthKey @@ -62,7 +61,7 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option /// Handle scrape request pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc) -> WebResult { - let mut files: HashMap = HashMap::new(); + let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; for info_hash in scrape_request.info_hashes.iter() { @@ -79,9 +78,7 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option) -> WebResult { - Ok(Response::new(ScrapeResponse { files }.write())) +fn send_scrape_response(files: HashMap) -> WebResult { + let res = ScrapeResponse { files }; + + match res.write() { + Ok(body) => Ok(Response::new(body)), + Err(_) => Err(reject::custom(ServerError::InternalServerError)) + } } /// Handle all server errors and send error reply diff --git a/src/http/response.rs b/src/http/response.rs index 3118f7df1..2bdd4c1e7 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -5,6 +5,7 @@ use std::net::IpAddr; use serde; use serde::Serialize; +use crate::InfoHash; #[derive(Serialize)] pub struct Peer { @@ -78,12 +79,30 @@ pub struct ScrapeResponseEntry { #[derive(Serialize)] pub struct ScrapeResponse { - pub files: HashMap, + pub files: HashMap, } impl ScrapeResponse { - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() + pub fn write(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); + + bytes.write(b"d5:filesd")?; + + for (info_hash, scrape_response_entry) in self.files.iter() { + bytes.write(b"20:")?; + bytes.write(&info_hash.0)?; + bytes.write(b"d8:completei")?; + bytes.write(scrape_response_entry.complete.to_string().as_bytes())?; + bytes.write(b"e10:downloadedi")?; + bytes.write(scrape_response_entry.downloaded.to_string().as_bytes())?; + bytes.write(b"e10:incompletei")?; + bytes.write(scrape_response_entry.incomplete.to_string().as_bytes())?; + bytes.write(b"ee")?; + } + + bytes.write(b"ee")?; + + Ok(bytes) } } diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 392966307..2b8ac3ebf 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -18,19 +18,6 @@ pub fn current_time() -> u64 { .as_secs() } -pub fn url_encode_bytes(content: &[u8]) -> Result> { - let mut out: String = String::new(); - - for byte in content.iter() { - match *byte as char { - '0'..='9' | 'a'..='z' | 'A'..='Z' | '.' | '-' | '_' | '~' => out.push(*byte as char), - _ => write!(&mut out, "%{:02x}", byte)?, - }; - } - - Ok(out) -} - pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { ser.serialize_u64(inst.elapsed().as_millis() as u64) } diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index fdbaac4de..45b23f728 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -92,7 +92,6 @@ impl TorrentTracker { } } - // todo: speed this up in non-public modes pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode if self.is_public() { return Ok(()); } @@ -111,6 +110,7 @@ impl TorrentTracker { } } + // todo: speed this up // check if info_hash is whitelisted if self.is_whitelisted() { if !self.is_info_hash_whitelisted(info_hash).await { From c4c6c3ddcb31c580cdc133eaa76191144a84ce58 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 18:28:02 +0200 Subject: [PATCH 047/435] feat: improved persistent torrent saving --- Cargo.lock | 3 +- Cargo.toml | 2 +- src/config.rs | 37 +++---- src/databases/database.rs | 10 +- src/databases/mysql.rs | 62 +++++------- src/databases/sqlite.rs | 37 ++++--- src/http/filters.rs | 6 +- src/http/handlers.rs | 2 +- src/jobs/log_statistics.rs | 32 ------ src/jobs/mod.rs | 2 - src/jobs/persistent_torrent_statistics.rs | 38 ------- src/protocol/utils.rs | 2 - src/setup.rs | 24 ++--- src/tracker/statistics.rs | 24 +++-- src/tracker/torrent.rs | 12 ++- src/tracker/tracker.rs | 118 +++++----------------- 16 files changed, 134 insertions(+), 277 deletions(-) delete mode 100644 src/jobs/log_statistics.rs delete mode 100644 src/jobs/persistent_torrent_statistics.rs diff --git a/Cargo.lock b/Cargo.lock index 4d6b9b17a..1ee924fac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -40,7 +40,8 @@ dependencies = [ [[package]] name = "aquatic_udp_protocol" version = "0.2.0" -source = "git+https://github.com/greatest-ape/aquatic#99792eefc3a0cfb15dc9bbd351af94b14a44e9fc" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16149f27924d42b337a637cd90a8ee2a8973bbccf32aabebce2b3c66913f947f" dependencies = [ "byteorder", "either", diff --git a/Cargo.toml b/Cargo.toml index c320e9e03..53e2949db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,4 +47,4 @@ thiserror = "1.0" futures = "0.3.21" async-trait = "0.1.52" -aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } +aquatic_udp_protocol = "0.2.0" diff --git a/src/config.rs b/src/config.rs index 292b065b4..e2cd167f5 100644 --- a/src/config.rs +++ b/src/config.rs @@ -29,15 +29,6 @@ pub struct HttpTrackerConfig { pub ssl_key_path: Option, } -impl HttpTrackerConfig { - pub fn verify_ssl_cert_and_key_set(&self) -> bool { - self.ssl_cert_path.is_some() - && self.ssl_key_path.is_some() - && !self.ssl_cert_path.as_ref().unwrap().is_empty() - && !self.ssl_key_path.as_ref().unwrap().is_empty() - } -} - #[derive(Serialize, Deserialize)] pub struct HttpApiConfig { pub enabled: bool, @@ -48,20 +39,18 @@ pub struct HttpApiConfig { #[derive(Serialize, Deserialize)] pub struct Configuration { pub log_level: Option, - pub log_interval: Option, pub mode: TrackerMode, pub db_driver: DatabaseDrivers, pub db_path: String, - pub statistics: bool, + pub announce_interval: u32, + pub min_announce_interval: u32, + pub max_peer_timeout: u32, + pub on_reverse_proxy: bool, + pub external_ip: Option, + pub tracker_usage_statistics: bool, pub persistent_torrent_completed_stat: bool, - pub persistence_interval: u64, pub inactive_peer_cleanup_interval: u64, pub remove_peerless_torrents: bool, - pub external_ip: Option, - pub announce_interval: u32, - pub announce_interval_min: u32, - pub peer_timeout: u32, - pub on_reverse_proxy: bool, pub udp_trackers: Vec, pub http_trackers: Vec, pub http_api: HttpApiConfig, @@ -134,20 +123,18 @@ impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), - log_interval: Some(60), mode: TrackerMode::Public, db_driver: DatabaseDrivers::Sqlite3, db_path: String::from("data.db"), - statistics: true, + announce_interval: 120, + min_announce_interval: 120, + max_peer_timeout: 900, + on_reverse_proxy: false, + external_ip: Some(String::from("0.0.0.0")), + tracker_usage_statistics: true, persistent_torrent_completed_stat: false, - persistence_interval: 0, inactive_peer_cleanup_interval: 600, remove_peerless_torrents: true, - external_ip: Some(String::from("0.0.0.0")), - announce_interval: 120, - announce_interval_min: 120, - peer_timeout: 900, - on_reverse_proxy: false, udp_trackers: Vec::new(), http_trackers: Vec::new(), http_api: HttpApiConfig { diff --git a/src/databases/database.rs b/src/databases/database.rs index e2f3cdfe5..721dfd00d 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -1,15 +1,11 @@ -use std::collections::BTreeMap; - use async_trait::async_trait; use derive_more::{Display, Error}; -use log::debug; use serde::{Deserialize, Serialize}; use crate::InfoHash; use crate::tracker::key::AuthKey; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; -use crate::tracker::torrent::TorrentEntry; #[derive(Serialize, Deserialize, Debug)] pub enum DatabaseDrivers { @@ -18,8 +14,6 @@ pub enum DatabaseDrivers { } pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result, r2d2::Error> { - debug!("{:?}", db_driver); - let database: Box = match db_driver { DatabaseDrivers::Sqlite3 => { let db = SqliteDatabase::new(db_path)?; @@ -44,7 +38,9 @@ pub trait Database: Sync + Send { async fn load_keys(&self) -> Result, Error>; - async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), Error>; + async fn load_whitelist(&self) -> Result, Error>; + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 6fd63b7d9..74f807d70 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -1,10 +1,9 @@ -use std::collections::BTreeMap; use std::str::FromStr; use async_trait::async_trait; use log::{debug}; use r2d2::Pool; -use r2d2_mysql::mysql::{Opts, OptsBuilder, params, TxOpts}; +use r2d2_mysql::mysql::{Opts, OptsBuilder, params}; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::MysqlConnectionManager; @@ -12,7 +11,6 @@ use crate::{AUTH_KEY_LENGTH, InfoHash}; use crate::databases::database::{Database, Error}; use crate::databases::database; use crate::tracker::key::AuthKey; -use crate::tracker::torrent::TorrentEntry; pub struct MysqlDatabase { pool: Pool, @@ -37,20 +35,20 @@ impl Database for MysqlDatabase { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTO_INCREMENT, - info_hash BINARY(20) NOT NULL UNIQUE + info_hash VARCHAR(40) NOT NULL UNIQUE );".to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( id integer PRIMARY KEY AUTO_INCREMENT, - info_hash BINARY(20) NOT NULL UNIQUE, + info_hash VARCHAR(40) NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL );".to_string(); let create_keys_table = format!(" CREATE TABLE IF NOT EXISTS `keys` ( `id` INT NOT NULL AUTO_INCREMENT, - `key` BINARY({}) NOT NULL, + `key` VARCHAR({}) NOT NULL, `valid_until` INT(10) NOT NULL, PRIMARY KEY (`id`), UNIQUE (`key`) @@ -68,7 +66,7 @@ impl Database for MysqlDatabase { async fn load_persistent_torrents(&self) -> Result, database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT HEX(info_hash), completed FROM torrents", |(info_hash_string, completed): (String, u32)| { + let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); (info_hash, completed) }).map_err(|_| database::Error::QueryReturnedNoRows)?; @@ -89,44 +87,38 @@ impl Database for MysqlDatabase { Ok(keys) } - async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { + async fn load_whitelist(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + + let info_hashes: Vec = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { + InfoHash::from_str(&info_hash).unwrap() + }).map_err(|_| database::Error::QueryReturnedNoRows)?; + + Ok(info_hashes) + } + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?; + let info_hash_str = info_hash.to_string(); - let mut insert_vector= vec![]; + debug!("{}", info_hash_str); - for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string())); - if insert_vector.len() == 1000 { - let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); - if db_transaction.query_drop(query).is_err() { - return Err(Error::InvalidQuery); - } - insert_vector.clear(); + match conn.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", params! { info_hash_str, completed }) { + Ok(_) => { + Ok(()) } - } - - if insert_vector.len() != 0 { - let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(",")); - if db_transaction.query_drop(query).is_err() { - return Err(Error::InvalidQuery); + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) } - insert_vector.clear(); } - - if db_transaction.commit().is_err() { - return Err(Error::DatabaseError); - }; - - Ok(()) } async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - match conn.exec_first::("SELECT HEX(info_hash) FROM whitelist WHERE info_hash = UNHEX(:info_hash)", params! { info_hash => info_hash }) + match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) .map_err(|_| database::Error::QueryReturnedNoRows)? { Some(info_hash) => { Ok(InfoHash::from_str(&info_hash).unwrap()) @@ -142,7 +134,7 @@ impl Database for MysqlDatabase { let info_hash_str = info_hash.to_string(); - match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (UNHEX(:info_hash_str))", params! { info_hash_str }) { + match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { Ok(_) => { Ok(1) } @@ -158,7 +150,7 @@ impl Database for MysqlDatabase { let info_hash = info_hash.to_string(); - match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = UNHEX(:info_hash)", params! { info_hash }) { + match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { Ok(_) => { Ok(1) } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 9c452eadc..55c77969a 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -1,4 +1,3 @@ -use std::collections::BTreeMap; use std::str::FromStr; use async_trait::async_trait; @@ -11,7 +10,6 @@ use crate::{AUTH_KEY_LENGTH, InfoHash}; use crate::databases::database::{Database, Error}; use crate::databases::database; use crate::tracker::key::AuthKey; -use crate::tracker::torrent::TorrentEntry; pub struct SqliteDatabase { pool: Pool, @@ -96,20 +94,35 @@ impl Database for SqliteDatabase { Ok(keys) } - async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + async fn load_whitelist(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let db_transaction = conn.transaction()?; + let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; - for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); - let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]); - } + let info_hash_iter = stmt.query_map(NO_PARAMS, |row| { + let info_hash: String = row.get(0)?; + + Ok(InfoHash::from_str(&info_hash).unwrap()) + })?; - let _ = db_transaction.commit(); + let info_hashes: Vec = info_hash_iter.filter_map(|x| x.ok()).collect(); + + Ok(info_hashes) + } + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { + let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; - Ok(()) + match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", &[info_hash.to_string(), completed.to_string()]) { + Ok(updated) => { + if updated > 0 { return Ok(()); } + Err(database::Error::QueryReturnedNoRows) + } + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } + } } async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { diff --git a/src/http/filters.rs b/src/http/filters.rs index d5a7881d8..a288f8d97 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -3,7 +3,6 @@ use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; -use log::debug; use warp::{Filter, reject, Rejection}; use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; @@ -75,11 +74,9 @@ async fn info_hashes(raw_query: String) -> WebResult> { for v in split_raw_query { if v.contains("info_hash") { let raw_info_hash = v.split("=").collect::>()[1]; - debug!("Raw info hash: {}", raw_info_hash); let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); if let Ok(ih) = info_hash { - debug!("Parsed info hash: {}", ih.to_string()); info_hashes.push(ih); } } @@ -151,8 +148,7 @@ async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Opti // set client ip to last forwarded ip let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - IpAddr::from_str(x_forwarded_ip).or_else(|e| { - debug!("{}", e); + IpAddr::from_str(x_forwarded_ip).or_else(|_| { Err(reject::custom(ServerError::AddressNotFound)) }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 977aeb4ba..7616ca301 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -56,7 +56,7 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; } } - send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) + send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.min_announce_interval) } /// Handle scrape request diff --git a/src/jobs/log_statistics.rs b/src/jobs/log_statistics.rs deleted file mode 100644 index f62399a47..000000000 --- a/src/jobs/log_statistics.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::sync::Arc; -use log::info; -use tokio::task::JoinHandle; -use crate::{Configuration}; -use crate::tracker::tracker::TorrentTracker; - -pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.log_interval.unwrap_or(60); - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; - - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - info!("Stopping statistics logging job.."); - break; - } - _ = interval.tick() => { - if let Some(tracker) = weak_tracker.upgrade() { - tracker.post_log().await; - } else { - break; - } - } - } - } - }) -} diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index a71fcb210..c3e58e56e 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -1,6 +1,4 @@ -pub mod persistent_torrent_statistics; pub mod torrent_cleanup; pub mod tracker_api; -pub mod log_statistics; pub mod http_tracker; pub mod udp_tracker; diff --git a/src/jobs/persistent_torrent_statistics.rs b/src/jobs/persistent_torrent_statistics.rs deleted file mode 100644 index 54ee23b6b..000000000 --- a/src/jobs/persistent_torrent_statistics.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::sync::Arc; -use log::info; -use tokio::task::JoinHandle; -use crate::{Configuration}; -use crate::tracker::tracker::TorrentTracker; - -pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.persistence_interval; - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; - - // periodically save torrents to database - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - // Save before shutting down - tracker.periodic_saving().await; - info!("Stopping periodic torrent saving job.."); - break; - } - _ = interval.tick() => { - if let Some(tracker) = weak_tracker.upgrade() { - info!("Saving torrents to database..."); - tracker.periodic_saving().await; - info!("Periodic saving done."); - } else { - // If tracker no longer exists, stop job - break; - } - } - } - } - }) -} diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 2b8ac3ebf..30b87b99b 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,5 +1,3 @@ -use std::error::Error; -use std::fmt::Write; use std::net::SocketAddr; use std::time::SystemTime; diff --git a/src/setup.rs b/src/setup.rs index 69f2db432..ed9b6d8ff 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -1,24 +1,21 @@ use std::sync::Arc; -use log::{info, warn}; +use log::{warn}; use tokio::task::JoinHandle; use crate::{Configuration}; -use crate::jobs::{http_tracker, log_statistics, persistent_torrent_statistics, torrent_cleanup, tracker_api, udp_tracker}; +use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; use crate::tracker::tracker::TorrentTracker; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ let mut jobs: Vec> = Vec::new(); + // Load peer keys if tracker.is_private() { - tracker.load_keys().await.expect("Could not retrieve keys."); + tracker.load_keys().await.expect("Could not retrieve keys from database."); } - // todo: replace by realtime updates - // Load persistent torrents - if config.persistent_torrent_completed_stat && config.persistence_interval > 0 { - info!("Loading persistent torrents into memory.."); - tracker.load_persistent_torrents().await.expect("Could not load persistent torrents."); - info!("Persistent torrents loaded."); - jobs.push(persistent_torrent_statistics::start_job(&config, tracker.clone())); + // Load whitelisted torrents + if tracker.is_whitelisted() { + tracker.load_whitelist().await.expect("Could not load whitelist from database."); } // Start the UDP blocks @@ -48,12 +45,5 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< jobs.push(torrent_cleanup::start_job(&config, tracker.clone())); } - // Log detailed torrent stats - if let Some(log_interval) = config.log_interval { - if log_interval > 0 { - jobs.push(log_statistics::start_job(&config, tracker.clone())); - } - } - jobs } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 5822d2d4e..c67df72ec 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -109,12 +109,24 @@ impl StatsTracker { stats_lock.tcp6_scrapes_handled += 1; stats_lock.tcp6_connections_handled += 1; } - TrackerStatisticsEvent::Udp4Connect => { stats_lock.udp4_connections_handled += 1; } - TrackerStatisticsEvent::Udp4Announce => { stats_lock.udp4_announces_handled += 1; } - TrackerStatisticsEvent::Udp4Scrape => { stats_lock.udp4_scrapes_handled += 1; } - TrackerStatisticsEvent::Udp6Connect => { stats_lock.udp6_connections_handled += 1; } - TrackerStatisticsEvent::Udp6Announce => { stats_lock.udp6_announces_handled += 1; } - TrackerStatisticsEvent::Udp6Scrape => { stats_lock.udp6_scrapes_handled += 1; } + TrackerStatisticsEvent::Udp4Connect => { + stats_lock.udp4_connections_handled += 1; + } + TrackerStatisticsEvent::Udp4Announce => { + stats_lock.udp4_announces_handled += 1; + } + TrackerStatisticsEvent::Udp4Scrape => { + stats_lock.udp4_scrapes_handled += 1; + } + TrackerStatisticsEvent::Udp6Connect => { + stats_lock.udp6_connections_handled += 1; + } + TrackerStatisticsEvent::Udp6Announce => { + stats_lock.udp6_announces_handled += 1; + } + TrackerStatisticsEvent::Udp6Scrape => { + stats_lock.udp6_scrapes_handled += 1; + } } drop(stats_lock); diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 4b891e992..0c03e3f82 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -21,7 +21,10 @@ impl TorrentEntry { } } - pub fn update_peer(&mut self, peer: &TorrentPeer) { + // Update peer and return completed (times torrent has been downloaded) + pub fn update_peer(&mut self, peer: &TorrentPeer) -> bool { + let mut did_torrent_stats_change: bool = false; + match peer.event { AnnounceEvent::Stopped => { let _ = self.peers.remove(&peer.peer_id); @@ -29,12 +32,17 @@ impl TorrentEntry { AnnounceEvent::Completed => { let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); // Don't count if peer was not previously known - if peer_old.is_some() { self.completed += 1; } + if peer_old.is_some() { + self.completed += 1; + did_torrent_stats_change = true; + } } _ => { let _ = self.peers.insert(peer.peer_id.clone(), peer.clone()); } } + + did_torrent_stats_change } pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&TorrentPeer> { diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 45b23f728..bcfae3c37 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -3,7 +3,6 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use log::info; use tokio::sync::{RwLock, RwLockReadGuard}; use tokio::sync::mpsc::error::SendError; @@ -22,9 +21,8 @@ pub struct TorrentTracker { pub config: Arc, mode: TrackerMode, keys: RwLock>, + whitelist: RwLock>, torrents: RwLock>, - updates: RwLock>, - shadow: RwLock>, stats_tracker: StatsTracker, database: Box } @@ -35,15 +33,14 @@ impl TorrentTracker { let mut stats_tracker = StatsTracker::new(); // starts a thread for updating tracker stats - if config.statistics { stats_tracker.run_worker(); } + if config.tracker_usage_statistics { stats_tracker.run_worker(); } Ok(TorrentTracker { config: config.clone(), mode: config.mode, keys: RwLock::new(std::collections::HashMap::new()), + whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), - updates: RwLock::new(std::collections::HashMap::new()), - shadow: RwLock::new(std::collections::HashMap::new()), stats_tracker, database }) @@ -64,7 +61,7 @@ impl TorrentTracker { pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { let auth_key = key::generate_auth_key(seconds_valid); - // add key to database + // Add key to database if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } // Add key to in-memory database @@ -125,6 +122,8 @@ impl TorrentTracker { let keys_from_database = self.database.load_keys().await?; let mut keys = self.keys.write().await; + keys.clear(); + for key in keys_from_database { let _ = keys.insert(key.key.clone(), key); } @@ -132,6 +131,19 @@ impl TorrentTracker { Ok(()) } + pub async fn load_whitelist(&self) -> Result<(), database::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _ = whitelist.insert(info_hash); + } + + Ok(()) + } + // Loading the torrents from database into memory pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; @@ -152,12 +164,6 @@ impl TorrentTracker { Ok(()) } - // Saving the torrents from memory - pub async fn save_torrents(&self) -> Result<(), database::Error> { - let torrents = self.torrents.read().await; - self.database.save_persistent_torrent_data(&*torrents).await - } - // Adding torrents is not relevant to public trackers. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { self.database.add_info_hash_to_whitelist(info_hash.clone()).await @@ -199,19 +205,15 @@ impl TorrentTracker { } }; - torrent_entry.update_peer(peer); + let stats_updated = torrent_entry.update_peer(peer); - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - if self.config.persistent_torrent_completed_stat { - let mut updates = self.updates.write().await; - if updates.contains_key(info_hash) { - updates.remove(info_hash); - } - updates.insert(*info_hash, completed); - drop(updates); + // todo: move this action to a separate worker + if self.config.persistent_torrent_completed_stat && stats_updated { + let _ = self.database.save_persistent_torrent(&info_hash, torrent_entry.completed).await; } + let (seeders, completed, leechers) = torrent_entry.get_stats(); + TorrentStats { seeders, leechers, @@ -231,19 +233,6 @@ impl TorrentTracker { self.stats_tracker.send_event(event).await } - pub async fn post_log(&self) { - let torrents = self.torrents.read().await; - let torrents_size = torrents.len(); - drop(torrents); - let updates = self.updates.read().await; - let updates_size = updates.len(); - drop(updates); - let shadow = self.shadow.read().await; - let shadow_size = shadow.len(); - drop(shadow); - info!("-=[ Stats ]=- | Torrents: {} | Updates: {} | Shadow: {}", torrents_size, updates_size, shadow_size); - } - // Remove inactive peers and (optionally) peerless torrents pub async fn cleanup_torrents(&self) { let mut torrents_lock = self.torrents.write().await; @@ -251,7 +240,7 @@ impl TorrentTracker { // If we don't need to remove torrents we will use the faster iter if self.config.remove_peerless_torrents { torrents_lock.retain(|_, torrent_entry| { - torrent_entry.remove_inactive_peers(self.config.peer_timeout); + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); match self.config.persistent_torrent_completed_stat { true => { torrent_entry.completed > 0 || torrent_entry.peers.len() > 0 } @@ -260,61 +249,8 @@ impl TorrentTracker { }); } else { for (_, torrent_entry) in torrents_lock.iter_mut() { - torrent_entry.remove_inactive_peers(self.config.peer_timeout); + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); } } } - - // todo: refactor - pub async fn periodic_saving(&self) { - // Get a lock for writing - // let mut shadow = self.shadow.write().await; - - // We will get the data and insert it into the shadow, while clearing updates. - let mut updates = self.updates.write().await; - let mut updates_cloned: std::collections::HashMap = std::collections::HashMap::new(); - // let mut torrent_hashes: Vec = Vec::new(); - // Copying updates to updates_cloned - for (k, completed) in updates.iter() { - updates_cloned.insert(k.clone(), completed.clone()); - } - updates.clear(); - drop(updates); - - // Copying updates_cloned into the shadow to overwrite - for (k, completed) in updates_cloned.iter() { - let mut shadows = self.shadow.write().await; - if shadows.contains_key(k) { - shadows.remove(k); - } - shadows.insert(k.clone(), completed.clone()); - drop(shadows); - } - drop(updates_cloned); - - // We updated the shadow data from the updates data, let's handle shadow data as expected. - // Handle shadow_copy to be updated into SQL - let mut shadow_copy: BTreeMap = BTreeMap::new(); - let shadows = self.shadow.read().await; - for (infohash, completed) in shadows.iter() { - shadow_copy.insert(infohash.clone(), TorrentEntry { - peers: Default::default(), - completed: completed.clone(), - }); - } - drop(shadows); - - // We will now save the data from the shadow into the database. - // This should not put any strain on the server itself, other then the harddisk/ssd. - info!("Start saving shadow data into SQL..."); - let result = self.database.save_persistent_torrent_data(&shadow_copy).await; - if result.is_ok() { - info!("Done saving data to SQL and succeeded, emptying shadow..."); - let mut shadow = self.shadow.write().await; - shadow.clear(); - drop(shadow); - } else { - info!("Done saving data to SQL and failed, not emptying shadow..."); - } - } } From 0a771314295c79bc5c89bdbcdb35057df085d127 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 20:05:11 +0200 Subject: [PATCH 048/435] fix: fixed up some database errors --- src/databases/mysql.rs | 18 +++++++++--------- src/databases/sqlite.rs | 38 +++++++++++++++++++------------------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 74f807d70..5b6e34eb1 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -54,7 +54,7 @@ impl Database for MysqlDatabase { UNIQUE (`key`) );", AUTH_KEY_LENGTH as i8); - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; conn.query_drop(&create_torrents_table).expect("Could not create torrents table."); conn.query_drop(&create_keys_table).expect("Could not create keys table."); @@ -64,7 +64,7 @@ impl Database for MysqlDatabase { } async fn load_persistent_torrents(&self) -> Result, database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); @@ -98,7 +98,7 @@ impl Database for MysqlDatabase { } async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -116,7 +116,7 @@ impl Database for MysqlDatabase { } async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) .map_err(|_| database::Error::QueryReturnedNoRows)? { @@ -130,7 +130,7 @@ impl Database for MysqlDatabase { } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -146,7 +146,7 @@ impl Database for MysqlDatabase { } async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let info_hash = info_hash.to_string(); @@ -162,7 +162,7 @@ impl Database for MysqlDatabase { } async fn get_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) .map_err(|_| database::Error::QueryReturnedNoRows)? { @@ -179,7 +179,7 @@ impl Database for MysqlDatabase { } async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.unwrap_or(0).to_string(); @@ -196,7 +196,7 @@ impl Database for MysqlDatabase { } async fn remove_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { Ok(_) => { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 55c77969a..e1659d897 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -6,7 +6,7 @@ use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use r2d2_sqlite::rusqlite::NO_PARAMS; -use crate::{AUTH_KEY_LENGTH, InfoHash}; +use crate::{InfoHash}; use crate::databases::database::{Database, Error}; use crate::databases::database; use crate::tracker::key::AuthKey; @@ -30,25 +30,25 @@ impl Database for SqliteDatabase { fn create_database_tables(&self) -> Result<(), database::Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE );".to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE, + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL );".to_string(); - let create_keys_table = format!(" + let create_keys_table = " CREATE TABLE IF NOT EXISTS keys ( - id integer PRIMARY KEY AUTOINCREMENT, - key VARCHAR({}) NOT NULL UNIQUE, - valid_until INT(10) NOT NULL - );", AUTH_KEY_LENGTH as i8); + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER NOT NULL + );".to_string(); - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; conn.execute(&create_whitelist_table, NO_PARAMS) .and_then(|_| conn.execute(&create_keys_table, NO_PARAMS)) @@ -58,7 +58,7 @@ impl Database for SqliteDatabase { } async fn load_persistent_torrents(&self) -> Result, database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -111,7 +111,7 @@ impl Database for SqliteDatabase { } async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", &[info_hash.to_string(), completed.to_string()]) { Ok(updated) => { @@ -126,7 +126,7 @@ impl Database for SqliteDatabase { } async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; let mut rows = stmt.query(&[info_hash])?; @@ -142,7 +142,7 @@ impl Database for SqliteDatabase { } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { Ok(updated) => { @@ -157,7 +157,7 @@ impl Database for SqliteDatabase { } async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { Ok(updated) => { @@ -172,7 +172,7 @@ impl Database for SqliteDatabase { } async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; let mut rows = stmt.query(&[key.to_string()])?; @@ -191,7 +191,7 @@ impl Database for SqliteDatabase { } async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], @@ -208,7 +208,7 @@ impl Database for SqliteDatabase { } async fn remove_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::InvalidQuery)?; + let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { Ok(updated) => { From 68d3242207d9a5d76114866a9acabe9cf0cae9e9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 20:30:05 +0200 Subject: [PATCH 049/435] some code cleanup --- src/api/server.rs | 4 ++ src/tracker/tracker.rs | 110 ++++++++++++++++++----------------------- 2 files changed, 53 insertions(+), 61 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index 77496b497..58c3f0d1a 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -152,7 +152,9 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp udp6_announces_handled: 0, udp6_scrapes_handled: 0, }; + let db = tracker.get_torrents().await; + let _: Vec<_> = db .iter() .map(|(_info_hash, torrent_entry)| { @@ -163,7 +165,9 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp results.torrents += 1; }) .collect(); + let stats = tracker.get_stats().await; + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index bcfae3c37..163bfe446 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -60,33 +60,66 @@ impl TorrentTracker { pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { let auth_key = key::generate_auth_key(seconds_valid); - - // Add key to database - if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error); } - - // Add key to in-memory database + self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); - Ok(auth_key) } - pub async fn remove_auth_key(&self, key: &str) -> Result { + pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { self.database.remove_key_from_keys(&key).await?; - - // Remove key from in-memory database self.keys.write().await.remove(key); - - Ok(1) + Ok(()) } pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { - let keys_lock = self.keys.read().await; + match self.keys.read().await.get(&auth_key.key) { + None => Err(key::Error::KeyInvalid), + Some(key) => key::verify_auth_key(key) + } + } - if let Some(key) = keys_lock.get(&auth_key.key) { - key::verify_auth_key(key) - } else { - Err(key::Error::KeyInvalid) + pub async fn load_keys(&self) -> Result<(), database::Error> { + let keys_from_database = self.database.load_keys().await?; + let mut keys = self.keys.write().await; + + keys.clear(); + + for key in keys_from_database { + let _ = keys.insert(key.key.clone(), key); + } + + Ok(()) + } + + // Adding torrents is not relevant to public trackers. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.add_info_hash_to_whitelist(info_hash.clone()).await?; + self.whitelist.write().await.insert(info_hash.clone()); + Ok(()) + } + + // Removing torrents is not relevant to public trackers. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.remove_info_hash_from_whitelist(info_hash.clone()).await?; + self.whitelist.write().await.remove(info_hash); + Ok(()) + } + + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + pub async fn load_whitelist(&self) -> Result<(), database::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _ = whitelist.insert(info_hash); } + + Ok(()) } pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { @@ -107,7 +140,6 @@ impl TorrentTracker { } } - // todo: speed this up // check if info_hash is whitelisted if self.is_whitelisted() { if !self.is_info_hash_whitelisted(info_hash).await { @@ -118,32 +150,6 @@ impl TorrentTracker { Ok(()) } - pub async fn load_keys(&self) -> Result<(), database::Error> { - let keys_from_database = self.database.load_keys().await?; - let mut keys = self.keys.write().await; - - keys.clear(); - - for key in keys_from_database { - let _ = keys.insert(key.key.clone(), key); - } - - Ok(()) - } - - pub async fn load_whitelist(&self) -> Result<(), database::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist().await?; - let mut whitelist = self.whitelist.write().await; - - whitelist.clear(); - - for info_hash in whitelisted_torrents_from_database { - let _ = whitelist.insert(info_hash); - } - - Ok(()) - } - // Loading the torrents from database into memory pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; @@ -164,24 +170,6 @@ impl TorrentTracker { Ok(()) } - // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { - self.database.add_info_hash_to_whitelist(info_hash.clone()).await - } - - // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result { - self.database.remove_info_hash_from_whitelist(info_hash.clone()).await - } - - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - match self.database.get_info_hash_from_whitelist(&info_hash.to_string()).await { - Ok(_) => true, - Err(_) => false - } - } - - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr, ) -> Vec { let read_lock = self.torrents.read().await; From ce24446cbb1ed49acdbd9104245b1f5b70e027ab Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 21:41:54 +0200 Subject: [PATCH 050/435] feat: scrape unauthenticated info_hashes return 0 values instead of error --- src/http/handlers.rs | 11 ++++++----- src/udp/handlers.rs | 24 ++++++++++++++++-------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 7616ca301..0dc737641 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -65,13 +65,14 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { - let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } + if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { + let (seeders, completed, leechers) = torrent_info.get_stats(); + ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } + } else { + ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } + } } None => { ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 4ea767c0b..860a2fe4b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -154,16 +154,22 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra for info_hash in request.info_hashes.iter() { let info_hash = InfoHash(info_hash.0); - if authenticate(&info_hash, tracker.clone()).await.is_err() { continue; } - let scrape_entry = match db.get(&info_hash) { Some(torrent_info) => { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - TorrentScrapeStatistics { - seeders: NumberOfPeers(seeders as i32), - completed: NumberOfDownloads(completed as i32), - leechers: NumberOfPeers(leechers as i32), + if authenticate(&info_hash, tracker.clone()).await.is_ok() { + let (seeders, completed, leechers) = torrent_info.get_stats(); + + TorrentScrapeStatistics { + seeders: NumberOfPeers(seeders as i32), + completed: NumberOfDownloads(completed as i32), + leechers: NumberOfPeers(leechers as i32), + } + } else { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + } } } None => { @@ -178,6 +184,8 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra torrent_stats.push(scrape_entry); } + drop(db); + // send stats event match remote_addr { SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; } From 77e173cf2b7e0e075efff5b8619fc1c1ecf0a84c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 21:48:19 +0200 Subject: [PATCH 051/435] refactor: removed root from http routes --- src/http/routes.rs | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/src/http/routes.rs b/src/http/routes.rs index a0b197f44..53b2b0ce5 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -14,22 +14,11 @@ use crate::tracker::tracker::TorrentTracker; /// All routes pub fn routes(tracker: Arc) -> impl Filter + Clone { - root(tracker.clone()) - .or(announce(tracker.clone())) - .or(scrape(tracker.clone())) + announce(tracker.clone()) + .or(scrape(tracker)) .recover(send_error) } -/// GET / or / -fn root(tracker: Arc) -> impl Filter + Clone { - warp::any() - .and(warp::filters::method::get()) - .and(with_announce_request(tracker.config.on_reverse_proxy)) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_announce) -} - /// GET /announce or /announce/ fn announce(tracker: Arc) -> impl Filter + Clone { warp::path::path("announce") From b9aca67246df50f6ed183f7eb61652c54dc3a40c Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 22:02:54 +0200 Subject: [PATCH 052/435] feat: added endpoints to reload whitelist and keys from database --- src/api/server.rs | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/src/api/server.rs b/src/api/server.rs index 58c3f0d1a..19ceac92a 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -300,6 +300,46 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp } }); + // GET /api/whitelist/reload + // Reload whitelist + let t7 = tracker.clone(); + let reload_whitelist = filters::method::get() + .and(filters::path::path("whitelist")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || { + let tracker = t7.clone(); + tracker + }) + .and_then(|tracker: Arc| { + async move { + match tracker.load_whitelist().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to reload whitelist".into() })) + } + } + }); + + // GET /api/keys/reload + // Reload whitelist + let t8 = tracker.clone(); + let reload_keys = filters::method::get() + .and(filters::path::path("keys")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || { + let tracker = t8.clone(); + tracker + }) + .and_then(|tracker: Arc| { + async move { + match tracker.load_keys().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to reload keys".into() })) + } + } + }); + let api_routes = filters::path::path("api") .and(view_torrent_list @@ -309,6 +349,8 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .or(add_torrent) .or(create_key) .or(delete_key) + .or(reload_whitelist) + .or(reload_keys) ); let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); From df87a9d3e89e530bfe90f1a48d0df884d377b430 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 9 May 2022 22:04:28 +0200 Subject: [PATCH 053/435] updated example config --- README.md | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index bb4649271..beb2591ea 100644 --- a/README.md +++ b/README.md @@ -52,29 +52,26 @@ cargo build --release ```toml log_level = "info" mode = "public" +db_driver = "Sqlite3" db_path = "data.db" -persistence = false -cleanup_interval = 600 -cleanup_peerless = true -external_ip = "0.0.0.0" announce_interval = 120 -announce_interval_min = 900 -peer_timeout = 900 +min_announce_interval = 120 +max_peer_timeout = 900 on_reverse_proxy = false +external_ip = "0.0.0.0" +tracker_usage_statistics = true +persistent_torrent_completed_stat = false +inactive_peer_cleanup_interval = 600 +remove_peerless_torrents = true [[udp_trackers]] enabled = false bind_address = "0.0.0.0:6969" -[[udp_trackers]] -enabled = true -bind_address = "[::]:6969" - [[http_trackers]] enabled = true bind_address = "0.0.0.0:6969" ssl_enabled = false -ssl_bind_address = "0.0.0.0:6868" ssl_cert_path = "" ssl_key_path = "" From 2d97caba4389e09e0a7fe5f9ab73f6804b676c78 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Aug 2022 10:32:46 +0100 Subject: [PATCH 054/435] ci: show coverage report --- .github/workflows/test_build_release.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 7be5626e5..d848ed653 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -14,9 +14,12 @@ jobs: with: profile: minimal toolchain: stable + components: llvm-tools-preview - uses: Swatinem/rust-cache@v1 + - uses: taiki-e/install-action@cargo-llvm-cov + - uses: taiki-e/install-action@nextest - name: Run tests - run: cargo test + run: cargo llvm-cov nextest build: needs: test From b548e80e8cb5b1da87a14578d99df5f303cca345 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 9 Aug 2022 17:26:02 +0100 Subject: [PATCH 055/435] test: add test for config module --- Cargo.lock | 112 ++++++++++++++++++++++++++++- Cargo.toml | 2 + src/config.rs | 148 +++++++++++++++++++++++++++++++------- src/databases/database.rs | 2 +- src/main.rs | 4 +- 5 files changed, 237 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ee924fac..c52f6767b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -265,7 +265,7 @@ dependencies = [ "ansi_term", "atty", "bitflags", - "strsim", + "strsim 0.8.0", "textwrap", "unicode-width", "vec_map", @@ -352,6 +352,41 @@ dependencies = [ "typenum", ] +[[package]] +name = "darling" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4529658bdda7fd6769b8614be250cdcfc3aeb0ee72fe66f9e41e5e5eb73eac02" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "649c91bc01e8b1eac09fb91e8dbc7d517684ca6be8ebc75bb9cafc894f9fdb6f" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc69c5bfcbd2fc09a0f38451d2daf0e372e367986a83906d1b0dbc88134fb5" +dependencies = [ + "darling_core", + "quote", + "syn", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -812,6 +847,12 @@ dependencies = [ "want", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.2.3" @@ -831,6 +872,7 @@ checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" dependencies = [ "autocfg", "hashbrown", + "serde 1.0.137", ] [[package]] @@ -1131,7 +1173,7 @@ dependencies = [ "subprocess", "thiserror", "time 0.2.27", - "uuid", + "uuid 0.8.2", ] [[package]] @@ -1254,6 +1296,15 @@ dependencies = [ "libc", ] +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", +] + [[package]] name = "once_cell" version = "1.10.0" @@ -1826,6 +1877,34 @@ dependencies = [ "serde 1.0.137", ] +[[package]] +name = "serde_with" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89df7a26519371a3cce44fbb914c2819c84d9b897890987fa3ab096491cc0ea8" +dependencies = [ + "base64", + "chrono", + "hex", + "indexmap", + "serde 1.0.137", + "serde_json", + "serde_with_macros", + "time 0.3.13", +] + +[[package]] +name = "serde_with_macros" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de337f322382fcdfbb21a014f7c224ee041a23785651db67b9827403178f698f" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "sha-1" version = "0.9.8" @@ -1991,6 +2070,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "subprocess" version = "0.2.8" @@ -2096,6 +2181,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "time" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db76ff9fa4b1458b3c7f077f3ff9887394058460d21e634355b273aaf11eea45" +dependencies = [ + "itoa", + "libc", + "num_threads", + "serde 1.0.137", +] + [[package]] name = "time-macros" version = "0.1.1" @@ -2259,9 +2356,11 @@ dependencies = [ "serde 1.0.137", "serde_bencode", "serde_json", + "serde_with", "thiserror", "tokio", "toml", + "uuid 1.1.2", "warp", ] @@ -2421,6 +2520,15 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +[[package]] +name = "uuid" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" +dependencies = [ + "getrandom", +] + [[package]] name = "vcpkg" version = "0.2.15" diff --git a/Cargo.toml b/Cargo.toml index 53e2949db..554ba940d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ tokio = { version = "1.7", features = ["full"] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" serde_json = "1.0.72" +serde_with = "2.0.0" hex = "0.4.3" percent-encoding = "2.1.0" binascii = "0.1" @@ -48,3 +49,4 @@ futures = "0.3.21" async-trait = "0.1.52" aquatic_udp_protocol = "0.2.0" +uuid = { version = "1.1.2", features = ["v4"] } diff --git a/src/config.rs b/src/config.rs index e2cd167f5..8acda3f63 100644 --- a/src/config.rs +++ b/src/config.rs @@ -6,37 +6,39 @@ use std::path::Path; use std::str::FromStr; use config::{Config, ConfigError, File}; -use serde::{Deserialize, Serialize, Serializer}; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, NoneAsEmptyString}; use toml; use crate::databases::database::DatabaseDrivers; use crate::mode::TrackerMode; -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct UdpTrackerConfig { pub enabled: bool, pub bind_address: String, } -#[derive(Serialize, Deserialize, Debug)] +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct HttpTrackerConfig { pub enabled: bool, pub bind_address: String, pub ssl_enabled: bool, - #[serde(serialize_with = "none_as_empty_string")] + #[serde_as(as = "NoneAsEmptyString")] pub ssl_cert_path: Option, - #[serde(serialize_with = "none_as_empty_string")] + #[serde_as(as = "NoneAsEmptyString")] pub ssl_key_path: Option, } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct HttpApiConfig { pub enabled: bool, pub bind_address: String, pub access_tokens: HashMap, } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, @@ -75,18 +77,6 @@ impl std::fmt::Display for ConfigurationError { impl std::error::Error for ConfigurationError {} -pub fn none_as_empty_string(option: &Option, serializer: S) -> Result - where - T: Serialize, - S: Serializer, -{ - if let Some(value) = option { - value.serialize(serializer) - } else { - "".serialize(serializer) - } -} - impl Configuration { pub fn load(data: &[u8]) -> Result { toml::from_slice(data) @@ -161,18 +151,16 @@ impl Configuration { configuration } - pub fn load_from_file() -> Result { + pub fn load_from_file(path: &str) -> Result { let mut config = Config::new(); - const CONFIG_PATH: &str = "config.toml"; - - if Path::new(CONFIG_PATH).exists() { - config.merge(File::with_name(CONFIG_PATH))?; + if Path::new(path).exists() { + config.merge(File::with_name(path))?; } else { eprintln!("No config file found."); eprintln!("Creating config file.."); let config = Configuration::default(); - let _ = config.save_to_file(); + let _ = config.save_to_file(path); return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))); } @@ -181,9 +169,115 @@ impl Configuration { Ok(torrust_config) } - pub fn save_to_file(&self) -> Result<(), ()> { + pub fn save_to_file(&self, path: &str) -> Result<(), ()> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); - fs::write("config.toml", toml_string).expect("Could not write to file!"); + fs::write(path, toml_string).expect("Could not write to file!"); Ok(()) } } + +mod configuration { + + #[cfg(test)] + fn default_config_toml() -> String { + let config = r#"log_level = "info" + mode = "public" + db_driver = "Sqlite3" + db_path = "data.db" + announce_interval = 120 + min_announce_interval = 120 + max_peer_timeout = 900 + on_reverse_proxy = false + external_ip = "0.0.0.0" + tracker_usage_statistics = true + persistent_torrent_completed_stat = false + inactive_peer_cleanup_interval = 600 + remove_peerless_torrents = true + + [[udp_trackers]] + enabled = false + bind_address = "0.0.0.0:6969" + + [[http_trackers]] + enabled = false + bind_address = "0.0.0.0:6969" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" + + [http_api] + enabled = true + bind_address = "127.0.0.1:1212" + + [http_api.access_tokens] + admin = "MyAccessToken" + "#.lines().map(|line| line.trim_start()).collect::>().join("\n"); + config + } + + #[test] + fn should_have_a_default_value_for_the_log_level() { + use crate::Configuration; + + let configuration = Configuration::default(); + + assert_eq!(configuration.log_level, Option::from(String::from("info")), "Expected default log level to be: {:?}, got {:?}", Option::from(String::from("info")), configuration.log_level); + } + + #[test] + fn should_be_saved_in_a_toml_config_file() { + use std::env; + use crate::Configuration; + use std::fs; + use uuid::Uuid; + + // Build temp config file path + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); + + // Convert to argument type for Configuration::save_to_file + let config_file_path = temp_file.clone(); + let path = config_file_path.to_string_lossy().to_string(); + + let default_configuration = Configuration::default(); + + default_configuration.save_to_file(&path).expect("Could not save configuration to file"); + + let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); + + assert_eq!(contents, default_config_toml()); + } + + #[cfg(test)] + fn create_temp_config_file_with_default_config()-> String { + use std::env; + use std::fs::File; + use std::io::Write; + use uuid::Uuid; + + // Build temp config file path + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); + + // Convert to argument type for Configuration::load_from_file + let config_file_path = temp_file.clone(); + let path = config_file_path.to_string_lossy().to_string(); + + // Write file contents + let mut file = File::create(temp_file).unwrap(); + writeln!(&mut file, "{}", default_config_toml()).unwrap(); + + path + } + + #[test] + fn should_be_loaded_from_a_toml_config_file() { + use crate::Configuration; + + let config_file_path = create_temp_config_file_with_default_config(); + + let configuration = Configuration::load_from_file(&config_file_path).expect("Could not load configuration from file"); + + assert_eq!(configuration, Configuration::default()); + } +} \ No newline at end of file diff --git a/src/databases/database.rs b/src/databases/database.rs index 721dfd00d..915c5381e 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -7,7 +7,7 @@ use crate::tracker::key::AuthKey; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Debug)] pub enum DatabaseDrivers { Sqlite3, MySQL, diff --git a/src/main.rs b/src/main.rs index 794cda4bb..963419f03 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,8 +7,10 @@ use torrust_tracker::tracker::tracker::TorrentTracker; #[tokio::main] async fn main() { + const CONFIG_PATH: &str = "config.toml"; + // Initialize Torrust config - let config = match Configuration::load_from_file() { + let config = match Configuration::load_from_file(CONFIG_PATH) { Ok(config) => Arc::new(config), Err(error) => { panic!("{}", error) From 1e4cbb8f5af17cf09f07828a84615d930ea1bd87 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 08:21:39 +0100 Subject: [PATCH 056/435] test: refactor to follow rust conventions And add missing annotation for tests module. --- src/config.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/config.rs b/src/config.rs index 8acda3f63..c6414de5e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -176,7 +176,8 @@ impl Configuration { } } -mod configuration { +#[cfg(test)] +mod tests { #[cfg(test)] fn default_config_toml() -> String { @@ -216,7 +217,7 @@ mod configuration { } #[test] - fn should_have_a_default_value_for_the_log_level() { + fn configuration_should_have_a_default_value_for_the_log_level() { use crate::Configuration; let configuration = Configuration::default(); @@ -225,7 +226,7 @@ mod configuration { } #[test] - fn should_be_saved_in_a_toml_config_file() { + fn configuration_should_be_saved_in_a_toml_config_file() { use std::env; use crate::Configuration; use std::fs; @@ -271,7 +272,7 @@ mod configuration { } #[test] - fn should_be_loaded_from_a_toml_config_file() { + fn configuration_should_be_loaded_from_a_toml_config_file() { use crate::Configuration; let config_file_path = create_temp_config_file_with_default_config(); From 228585eee884437ac33d65ed089a3def45e1dd80 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 08:24:17 +0100 Subject: [PATCH 057/435] test: remove test for config default options --- src/config.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/config.rs b/src/config.rs index c6414de5e..3d6405621 100644 --- a/src/config.rs +++ b/src/config.rs @@ -216,15 +216,6 @@ mod tests { config } - #[test] - fn configuration_should_have_a_default_value_for_the_log_level() { - use crate::Configuration; - - let configuration = Configuration::default(); - - assert_eq!(configuration.log_level, Option::from(String::from("info")), "Expected default log level to be: {:?}, got {:?}", Option::from(String::from("info")), configuration.log_level); - } - #[test] fn configuration_should_be_saved_in_a_toml_config_file() { use std::env; From 1a9b7dbfd076a30afd0f5471f8cf5a54c22d1fd0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 08:26:19 +0100 Subject: [PATCH 058/435] refactor: remove duplicate impl block --- src/config.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/config.rs b/src/config.rs index 3d6405621..0eaaafc45 100644 --- a/src/config.rs +++ b/src/config.rs @@ -107,9 +107,7 @@ impl Configuration { } } } -} -impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), From 749e8963732ee6adf53622df49d3be7b8f11b36f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 09:53:13 +0100 Subject: [PATCH 059/435] refactor: remove unused code --- src/config.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/src/config.rs b/src/config.rs index 0eaaafc45..8e79c45ef 100644 --- a/src/config.rs +++ b/src/config.rs @@ -78,23 +78,6 @@ impl std::fmt::Display for ConfigurationError { impl std::error::Error for ConfigurationError {} impl Configuration { - pub fn load(data: &[u8]) -> Result { - toml::from_slice(data) - } - - pub fn load_file(path: &str) -> Result { - match std::fs::read(path) { - Err(e) => Err(ConfigurationError::IOError(e)), - Ok(data) => { - match Self::load(data.as_slice()) { - Ok(cfg) => { - Ok(cfg) - } - Err(e) => Err(ConfigurationError::ParseError(e)), - } - } - } - } pub fn get_ext_ip(&self) -> Option { match &self.external_ip { From d7dfe0252dd4a3a021ec4c7b390f512f1fb8ce67 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 10 Aug 2022 10:01:10 +0100 Subject: [PATCH 060/435] test: add more tests to configuration --- src/config.rs | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/config.rs b/src/config.rs index 8e79c45ef..005705f78 100644 --- a/src/config.rs +++ b/src/config.rs @@ -197,6 +197,26 @@ mod tests { config } + #[test] + fn configuration_should_have_default_values() { + use crate::Configuration; + + let configuration = Configuration::default(); + + let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); + + assert_eq!(toml, default_config_toml()); + } + + #[test] + fn configuration_should_contain_the_external_ip() { + use crate::Configuration; + + let configuration = Configuration::default(); + + assert_eq!(configuration.external_ip, Option::Some(String::from("0.0.0.0"))); + } + #[test] fn configuration_should_be_saved_in_a_toml_config_file() { use std::env; @@ -253,4 +273,13 @@ mod tests { assert_eq!(configuration, Configuration::default()); } + + #[test] + fn configuration_error_could_be_displayed() { + use crate::ConfigurationError; + + let error = ConfigurationError::TrackerModeIncompatible; + + assert_eq!(format!("{}", error), "TrackerModeIncompatible"); + } } \ No newline at end of file From f9880472179ce2f7c1733dc7ac1e0fd723e5c291 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 11 Aug 2022 19:24:03 +0200 Subject: [PATCH 061/435] chore(benchmark): added a basic script to benchmark http(s) announce performance and memory usage --- tests/README.md | 9 ++++++ tests/wrk_benchmark_announce.lua | 53 ++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 tests/README.md create mode 100644 tests/wrk_benchmark_announce.lua diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 000000000..04860056c --- /dev/null +++ b/tests/README.md @@ -0,0 +1,9 @@ +### Running Benchmarks + +#### HTTP(S) Announce Peer + Torrent +For this benchmark we use the tool [wrk](https://github.com/wg/wrk). + +To run the benchmark using wrk, execute the following example script (change the url to your own tracker url): + + wrk -c200 -t1 -d10s -s ./wrk_benchmark_announce.lua --latency http://tracker.dutchbits.nl + diff --git a/tests/wrk_benchmark_announce.lua b/tests/wrk_benchmark_announce.lua new file mode 100644 index 000000000..c182f8e68 --- /dev/null +++ b/tests/wrk_benchmark_announce.lua @@ -0,0 +1,53 @@ +-- else the randomness would be the same every run +math.randomseed(os.time()) + +local charset = "0123456789ABCDEF" + +function hexToChar(hex) + local n = tonumber(hex, 16) + local f = string.char(n) + return f +end + +function hexStringToCharString(hex) + local ret = {} + local r + for i = 0, 19 do + local x = i * 2 + r = hex:sub(x+1, x+2) + local f = hexToChar(r) + table.insert(ret, f) + end + return table.concat(ret) +end + +function urlEncode(str) + str = string.gsub (str, "([^0-9a-zA-Z !'()*._~-])", -- locale independent + function (c) return string.format ("%%%02X", string.byte(c)) end) + str = string.gsub (str, " ", "+") + return str +end + +function genHexString(length) + local ret = {} + local r + for i = 1, length do + r = math.random(1, #charset) + table.insert(ret, charset:sub(r, r)) + end + return table.concat(ret) +end + +function randomInfoHash() + local hexString = genHexString(40) + local str = hexStringToCharString(hexString) + return urlEncode(str) +end + +-- the request function that will run at each request +request = function() + path = "/announce?info_hash=" .. randomInfoHash() .. "&peer_id=-lt0D80-a%D4%10%19%99%A6yh%9A%E1%CD%96&port=54434&uploaded=885&downloaded=0&left=0&corrupt=0&key=A78381BD&numwant=200&compact=1&no_peer_id=1&supportcrypto=1&redundant=0" + headers = {} + headers["X-Forwarded-For"] = "1.1.1.1" + return wrk.format("GET", path, headers) +end From faf7a99b97b8ef2a2eaf8ea2a83fd985591c6905 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sun, 14 Aug 2022 16:11:56 +0200 Subject: [PATCH 062/435] chore: updated r2d2_sqlite and included other dependencies to support cross compilation --- Cargo.lock | 144 ++++++++++++++-------------------------- Cargo.toml | 6 +- src/databases/sqlite.rs | 23 +++---- 3 files changed, 63 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c52f6767b..279e4a67d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -743,6 +743,24 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashlink" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d452c155cb93fecdfb02a73dd57b5d8e442c2063bd7aac72f1bc5e4263a43086" +dependencies = [ + "hashbrown 0.12.3", +] + [[package]] name = "headers" version = "0.3.7" @@ -871,7 +889,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.11.2", "serde 1.0.137", ] @@ -964,10 +982,11 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.18.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e704a02bcaecd4a08b93a23f6be59d0bd79cd161e0963e9499165a0a35df7bd" +checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" dependencies = [ + "cc", "pkg-config", "vcpkg", ] @@ -1014,16 +1033,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" dependencies = [ - "hashbrown", -] - -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", + "hashbrown 0.11.2", ] [[package]] @@ -1319,9 +1329,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.40" +version = "0.10.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" +checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" dependencies = [ "bitflags", "cfg-if", @@ -1349,15 +1359,25 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-src" +version = "111.22.0+1.1.1q" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" -version = "0.9.73" +version = "0.9.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0" +checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" dependencies = [ "autocfg", "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] @@ -1370,17 +1390,7 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core 0.8.5", -] - -[[package]] -name = "parking_lot" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" -dependencies = [ - "lock_api", - "parking_lot_core 0.9.3", + "parking_lot_core", ] [[package]] @@ -1397,19 +1407,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "parking_lot_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-sys", -] - [[package]] name = "peeking_take_while" version = "0.1.2" @@ -1514,7 +1511,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" dependencies = [ "log", - "parking_lot 0.11.2", + "parking_lot", "scheduled-thread-pool", ] @@ -1530,9 +1527,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.16.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed60ebe88b27ac28c0563bc0fbeaecd302ff53e3a01e5ddc2ec9f4e6c707d929" +checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034" dependencies = [ "r2d2", "rusqlite", @@ -1626,18 +1623,16 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.23.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45d0fd62e1df63d254714e6cb40d0a0e82e7a1623e7a27f679d851af092ae58b" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", + "hashlink", "libsqlite3-sys", - "lru-cache", - "memchr", "smallvec", - "time 0.1.44", ] [[package]] @@ -1728,7 +1723,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" dependencies = [ - "parking_lot 0.11.2", + "parking_lot", ] [[package]] @@ -2243,7 +2238,6 @@ dependencies = [ "mio", "num_cpus", "once_cell", - "parking_lot 0.12.0", "pin-project-lite", "signal-hook-registry", "socket2", @@ -2253,9 +2247,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", @@ -2348,6 +2342,7 @@ dependencies = [ "futures", "hex", "log", + "openssl", "percent-encoding", "r2d2", "r2d2_mysql", @@ -2714,49 +2709,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - -[[package]] -name = "windows_i686_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - [[package]] name = "wyz" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 554ba940d..9d21ed7d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ lto = "fat" strip = true [dependencies] -tokio = { version = "1.7", features = ["full"] } +tokio = { version = "1.7", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" @@ -29,6 +29,8 @@ hex = "0.4.3" percent-encoding = "2.1.0" binascii = "0.1" +openssl = { version = "0.10.41", features = ["vendored"] } + warp = { version = "0.3", features = ["tls"] } config = "0.11" @@ -40,7 +42,7 @@ chrono = "0.4" r2d2 = "0.8.8" r2d2_mysql = "21.0.0" -r2d2_sqlite = "0.16.0" +r2d2_sqlite = { version = "0.21.0", features = ["bundled"] } rand = "0.8.4" derive_more = "0.99" diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index e1659d897..143029ec2 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -4,7 +4,6 @@ use async_trait::async_trait; use log::debug; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use r2d2_sqlite::rusqlite::NO_PARAMS; use crate::{InfoHash}; use crate::databases::database::{Database, Error}; @@ -50,9 +49,9 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - conn.execute(&create_whitelist_table, NO_PARAMS) - .and_then(|_| conn.execute(&create_keys_table, NO_PARAMS)) - .and_then(|_| conn.execute(&create_torrents_table, NO_PARAMS)) + conn.execute(&create_whitelist_table, []) + .and_then(|_| conn.execute(&create_keys_table, [])) + .and_then(|_| conn.execute(&create_torrents_table, [])) .map_err(|_| database::Error::InvalidQuery) .map(|_| ()) } @@ -62,7 +61,7 @@ impl Database for SqliteDatabase { let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; - let torrent_iter = stmt.query_map(NO_PARAMS, |row| { + let torrent_iter = stmt.query_map([], |row| { let info_hash_string: String = row.get(0)?; let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); let completed: u32 = row.get(1)?; @@ -79,7 +78,7 @@ impl Database for SqliteDatabase { let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; - let keys_iter = stmt.query_map(NO_PARAMS, |row| { + let keys_iter = stmt.query_map([], |row| { let key = row.get(0)?; let valid_until: i64 = row.get(1)?; @@ -99,7 +98,7 @@ impl Database for SqliteDatabase { let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; - let info_hash_iter = stmt.query_map(NO_PARAMS, |row| { + let info_hash_iter = stmt.query_map([], |row| { let info_hash: String = row.get(0)?; Ok(InfoHash::from_str(&info_hash).unwrap()) @@ -113,7 +112,7 @@ impl Database for SqliteDatabase { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", &[info_hash.to_string(), completed.to_string()]) { + match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", [info_hash.to_string(), completed.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(()); } Err(database::Error::QueryReturnedNoRows) @@ -144,7 +143,7 @@ impl Database for SqliteDatabase { async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { + match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) @@ -159,7 +158,7 @@ impl Database for SqliteDatabase { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { + match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } Err(database::Error::QueryReturnedNoRows) @@ -175,7 +174,7 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; - let mut rows = stmt.query(&[key.to_string()])?; + let mut rows = stmt.query([key.to_string()])?; if let Some(row) = rows.next()? { let key: String = row.get(0).unwrap(); @@ -194,7 +193,7 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], + [auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], ) { Ok(updated) => { if updated > 0 { return Ok(updated); } From 498c32b99c4fc1592d7e94ad46864142ce7aaaf1 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 18:20:50 +0200 Subject: [PATCH 063/435] formatting: add custom rustfmt.toml and update .gitignore --- .gitignore | 1 + rustfmt.toml | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 rustfmt.toml diff --git a/.gitignore b/.gitignore index 99a07430b..e2956b2d6 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ /.idea/ /config.toml /data.db +/.vscode/launch.json diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..3e878b271 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,4 @@ +max_width = 130 +imports_granularity = "Module" +group_imports = "StdExternalCrate" + From 57bf2000e39dccfc2f8b6e41d6c6f3eac38a3886 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 18:22:57 +0200 Subject: [PATCH 064/435] formatting: format the world! --- src/api/server.rs | 307 ++++++++++++++++++------------------ src/config.rs | 79 +++++----- src/databases/database.rs | 6 +- src/databases/mod.rs | 2 +- src/databases/mysql.rs | 134 +++++++++------- src/databases/sqlite.rs | 49 ++++-- src/http/filters.rs | 59 ++++--- src/http/handlers.rs | 117 ++++++++++---- src/http/mod.rs | 8 +- src/http/request.rs | 2 +- src/http/response.rs | 1 + src/http/routes.rs | 20 +-- src/http/server.rs | 24 ++- src/jobs/http_tracker.rs | 8 +- src/jobs/mod.rs | 2 +- src/jobs/torrent_cleanup.rs | 4 +- src/jobs/tracker_api.rs | 10 +- src/jobs/udp_tracker.rs | 4 +- src/lib.rs | 16 +- src/logging.rs | 22 ++- src/main.rs | 5 +- src/protocol/common.rs | 5 +- src/protocol/utils.rs | 4 +- src/setup.rs | 26 ++- src/tracker/key.rs | 31 ++-- src/tracker/mod.rs | 8 +- src/tracker/mode.rs | 2 +- src/tracker/peer.rs | 24 ++- src/tracker/statistics.rs | 4 +- src/tracker/torrent.rs | 17 +- src/tracker/tracker.rs | 51 +++--- src/udp/handlers.rs | 168 ++++++++++++-------- src/udp/mod.rs | 2 +- src/udp/server.rs | 4 +- 34 files changed, 682 insertions(+), 543 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index 19ceac92a..cc6c905e4 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -4,10 +4,10 @@ use std::net::SocketAddr; use std::sync::Arc; use serde::{Deserialize, Serialize}; -use warp::{Filter, filters, reply, serve}; +use warp::{filters, reply, serve, Filter}; -use crate::protocol::common::*; use crate::peer::TorrentPeer; +use crate::protocol::common::*; use crate::tracker::tracker::TorrentTracker; #[derive(Deserialize, Debug)] @@ -55,7 +55,7 @@ enum ActionStatus<'a> { impl warp::reject::Reject for ActionStatus<'static> {} -fn authenticate(tokens: HashMap) -> impl Filter + Clone { +fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] struct AuthToken { token: Option, @@ -67,18 +67,20 @@ fn authenticate(tokens: HashMap) -> impl Filter()) - .and_then(|tokens: Arc>, token: AuthToken| { - async move { - match token.token { - Some(token) => { - if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { reason: "token not valid".into() })); - } - - Ok(()) + .and_then(|tokens: Arc>, token: AuthToken| async move { + match token.token { + Some(token) => { + if !tokens.contains(&token) { + return Err(warp::reject::custom(ActionStatus::Err { + reason: "token not valid".into(), + })); } - None => Err(warp::reject::custom(ActionStatus::Err { reason: "unauthorized".into() })) + + Ok(()) } + None => Err(warp::reject::custom(ActionStatus::Err { + reason: "unauthorized".into(), + })), } }) .untuple_one() @@ -96,30 +98,28 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = api_torrents.clone(); (limits, tracker) }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| { - async move { - let offset = limits.offset.unwrap_or(0); - let limit = min(limits.limit.unwrap_or(1000), 4000); - - let db = tracker.get_torrents().await; - let results: Vec<_> = db - .iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - Torrent { - info_hash, - seeders, - completed, - leechers, - peers: None, - } - }) - .skip(offset as usize) - .take(limit as usize) - .collect(); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - } + .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { + let offset = limits.offset.unwrap_or(0); + let limit = min(limits.limit.unwrap_or(1000), 4000); + + let db = tracker.get_torrents().await; + let results: Vec<_> = db + .iter() + .map(|(info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + Torrent { + info_hash, + seeders, + completed, + leechers, + peers: None, + } + }) + .skip(offset as usize) + .take(limit as usize) + .collect(); + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) }); // GET /api/stats @@ -132,57 +132,55 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = api_stats.clone(); tracker }) - .and_then(|tracker: Arc| { - async move { - let mut results = Stats { - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }; - - let db = tracker.get_torrents().await; - - let _: Vec<_> = db - .iter() - .map(|(_info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }) - .collect(); - - let stats = tracker.get_stats().await; - - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - } + .and_then(|tracker: Arc| async move { + let mut results = Stats { + torrents: 0, + seeders: 0, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }; + + let db = tracker.get_torrents().await; + + let _: Vec<_> = db + .iter() + .map(|(_info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }) + .collect(); + + let stats = tracker.get_stats().await; + + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) }); // GET /api/torrent/:info_hash @@ -196,28 +194,26 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t2.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - let db = tracker.get_torrents().await; - let torrent_entry_option = db.get(&info_hash); + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + let db = tracker.get_torrents().await; + let torrent_entry_option = db.get(&info_hash); - if torrent_entry_option.is_none() { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")) - } + if torrent_entry_option.is_none() { + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); + } - let torrent_entry = torrent_entry_option.unwrap(); - let (seeders, completed, leechers) = torrent_entry.get_stats(); + let torrent_entry = torrent_entry_option.unwrap(); + let (seeders, completed, leechers) = torrent_entry.get_stats(); - let peers = torrent_entry.get_peers(None); + let peers = torrent_entry.get_peers(None); - Ok(reply::json(&Torrent { - info_hash: &info_hash, - seeders, - completed, - leechers, - peers: Some(peers), - })) - } + Ok(reply::json(&Torrent { + info_hash: &info_hash, + seeders, + completed, + leechers, + peers: Some(peers), + })) }); // DELETE /api/whitelist/:info_hash @@ -231,12 +227,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t3.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to remove torrent from whitelist".into() })) - } + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to remove torrent from whitelist".into(), + })), } }); @@ -251,12 +247,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t4.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to whitelist torrent".into() })) - } + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to whitelist torrent".into(), + })), } }); @@ -271,12 +267,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t5.clone(); (seconds_valid, tracker) }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| { - async move { - match tracker.generate_auth_key(seconds_valid).await { - Ok(auth_key) => Ok(warp::reply::json(&auth_key)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into() })) - } + .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { + match tracker.generate_auth_key(seconds_valid).await { + Ok(auth_key) => Ok(warp::reply::json(&auth_key)), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to generate key".into(), + })), } }); @@ -291,12 +287,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t6.clone(); (key, tracker) }) - .and_then(|(key, tracker): (String, Arc)| { - async move { - match tracker.remove_auth_key(&key).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to delete key".into() })) - } + .and_then(|(key, tracker): (String, Arc)| async move { + match tracker.remove_auth_key(&key).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to delete key".into(), + })), } }); @@ -311,12 +307,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t7.clone(); tracker }) - .and_then(|tracker: Arc| { - async move { - match tracker.load_whitelist().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to reload whitelist".into() })) - } + .and_then(|tracker: Arc| async move { + match tracker.load_whitelist().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload whitelist".into(), + })), } }); @@ -331,34 +327,31 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t8.clone(); tracker }) - .and_then(|tracker: Arc| { - async move { - match tracker.load_keys().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to reload keys".into() })) - } + .and_then(|tracker: Arc| async move { + match tracker.load_keys().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload keys".into(), + })), } }); - let api_routes = - filters::path::path("api") - .and(view_torrent_list - .or(delete_torrent) - .or(view_torrent_info) - .or(view_stats_list) - .or(add_torrent) - .or(create_key) - .or(delete_key) - .or(reload_whitelist) - .or(reload_keys) - ); + let api_routes = filters::path::path("api").and( + view_torrent_list + .or(delete_torrent) + .or(view_torrent_info) + .or(view_stats_list) + .or(add_torrent) + .or(create_key) + .or(delete_key) + .or(reload_whitelist) + .or(reload_keys), + ); let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); let (_addr, api_server) = serve(server).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c() - .await - .expect("Failed to listen to shutdown signal."); + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); api_server diff --git a/src/config.rs b/src/config.rs index 005705f78..c094eb2f9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,4 +1,3 @@ -use std; use std::collections::HashMap; use std::fs; use std::net::IpAddr; @@ -8,7 +7,7 @@ use std::str::FromStr; use config::{Config, ConfigError, File}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; -use toml; +use {std, toml}; use crate::databases::database::DatabaseDrivers; use crate::mode::TrackerMode; @@ -70,7 +69,7 @@ impl std::fmt::Display for ConfigurationError { match self { ConfigurationError::IOError(e) => e.fmt(f), ConfigurationError::ParseError(e) => e.fmt(f), - _ => write!(f, "{:?}", self) + _ => write!(f, "{:?}", self), } } } @@ -78,16 +77,13 @@ impl std::fmt::Display for ConfigurationError { impl std::error::Error for ConfigurationError {} impl Configuration { - pub fn get_ext_ip(&self) -> Option { match &self.external_ip { None => None, - Some(external_ip) => { - match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None - } - } + Some(external_ip) => match IpAddr::from_str(external_ip) { + Ok(external_ip) => Some(external_ip), + Err(_) => None, + }, } } @@ -111,24 +107,23 @@ impl Configuration { http_api: HttpApiConfig { enabled: true, bind_address: String::from("127.0.0.1:1212"), - access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), + access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] + .iter() + .cloned() + .collect(), }, }; - configuration.udp_trackers.push( - UdpTrackerConfig { - enabled: false, - bind_address: String::from("0.0.0.0:6969"), - } - ); - configuration.http_trackers.push( - HttpTrackerConfig { - enabled: false, - bind_address: String::from("0.0.0.0:6969"), - ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, - } - ); + configuration.udp_trackers.push(UdpTrackerConfig { + enabled: false, + bind_address: String::from("0.0.0.0:6969"), + }); + configuration.http_trackers.push(HttpTrackerConfig { + enabled: false, + bind_address: String::from("0.0.0.0:6969"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, + }); configuration } @@ -142,10 +137,14 @@ impl Configuration { eprintln!("Creating config file.."); let config = Configuration::default(); let _ = config.save_to_file(path); - return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))); + return Err(ConfigError::Message(format!( + "Please edit the config.TOML in the root folder and restart the tracker." + ))); } - let torrust_config: Configuration = config.try_into().map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; + let torrust_config: Configuration = config + .try_into() + .map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; Ok(torrust_config) } @@ -193,7 +192,11 @@ mod tests { [http_api.access_tokens] admin = "MyAccessToken" - "#.lines().map(|line| line.trim_start()).collect::>().join("\n"); + "# + .lines() + .map(|line| line.trim_start()) + .collect::>() + .join("\n"); config } @@ -219,11 +222,12 @@ mod tests { #[test] fn configuration_should_be_saved_in_a_toml_config_file() { - use std::env; - use crate::Configuration; - use std::fs; + use std::{env, fs}; + use uuid::Uuid; + use crate::Configuration; + // Build temp config file path let temp_directory = env::temp_dir(); let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); @@ -234,7 +238,9 @@ mod tests { let default_configuration = Configuration::default(); - default_configuration.save_to_file(&path).expect("Could not save configuration to file"); + default_configuration + .save_to_file(&path) + .expect("Could not save configuration to file"); let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); @@ -242,16 +248,17 @@ mod tests { } #[cfg(test)] - fn create_temp_config_file_with_default_config()-> String { + fn create_temp_config_file_with_default_config() -> String { use std::env; use std::fs::File; use std::io::Write; + use uuid::Uuid; // Build temp config file path let temp_directory = env::temp_dir(); let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); - + // Convert to argument type for Configuration::load_from_file let config_file_path = temp_file.clone(); let path = config_file_path.to_string_lossy().to_string(); @@ -282,4 +289,4 @@ mod tests { assert_eq!(format!("{}", error), "TrackerModeIncompatible"); } -} \ No newline at end of file +} diff --git a/src/databases/database.rs b/src/databases/database.rs index 915c5381e..adc735fd2 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -2,10 +2,10 @@ use async_trait::async_trait; use derive_more::{Display, Error}; use serde::{Deserialize, Serialize}; -use crate::InfoHash; -use crate::tracker::key::AuthKey; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; +use crate::tracker::key::AuthKey; +use crate::InfoHash; #[derive(Serialize, Deserialize, PartialEq, Debug)] pub enum DatabaseDrivers { @@ -70,7 +70,7 @@ impl From for Error { fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { match e { r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, - _ => Error::InvalidQuery + _ => Error::InvalidQuery, } } } diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 119e34816..169d99f4d 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -1,3 +1,3 @@ +pub mod database; pub mod mysql; pub mod sqlite; -pub mod database; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 5b6e34eb1..882fb7bf4 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -1,16 +1,16 @@ use std::str::FromStr; use async_trait::async_trait; -use log::{debug}; +use log::debug; use r2d2::Pool; -use r2d2_mysql::mysql::{Opts, OptsBuilder, params}; use r2d2_mysql::mysql::prelude::Queryable; +use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MysqlConnectionManager; -use crate::{AUTH_KEY_LENGTH, InfoHash}; -use crate::databases::database::{Database, Error}; use crate::databases::database; +use crate::databases::database::{Database, Error}; use crate::tracker::key::AuthKey; +use crate::{InfoHash, AUTH_KEY_LENGTH}; pub struct MysqlDatabase { pool: Pool, @@ -21,11 +21,11 @@ impl MysqlDatabase { let opts = Opts::from_url(&db_path).expect("Failed to connect to MySQL database."); let builder = OptsBuilder::from_opts(opts); let manager = MysqlConnectionManager::new(builder); - let pool = r2d2::Pool::builder().build(manager).expect("Failed to create r2d2 MySQL connection pool."); + let pool = r2d2::Pool::builder() + .build(manager) + .expect("Failed to create r2d2 MySQL connection pool."); - Ok(Self { - pool - }) + Ok(Self { pool }) } } @@ -36,29 +36,36 @@ impl Database for MysqlDatabase { CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTO_INCREMENT, info_hash VARCHAR(40) NOT NULL UNIQUE - );".to_string(); + );" + .to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( id integer PRIMARY KEY AUTO_INCREMENT, info_hash VARCHAR(40) NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL - );".to_string(); + );" + .to_string(); - let create_keys_table = format!(" + let create_keys_table = format!( + " CREATE TABLE IF NOT EXISTS `keys` ( `id` INT NOT NULL AUTO_INCREMENT, `key` VARCHAR({}) NOT NULL, `valid_until` INT(10) NOT NULL, PRIMARY KEY (`id`), UNIQUE (`key`) - );", AUTH_KEY_LENGTH as i8); + );", + AUTH_KEY_LENGTH as i8 + ); let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - conn.query_drop(&create_torrents_table).expect("Could not create torrents table."); + conn.query_drop(&create_torrents_table) + .expect("Could not create torrents table."); conn.query_drop(&create_keys_table).expect("Could not create keys table."); - conn.query_drop(&create_whitelist_table).expect("Could not create whitelist table."); + conn.query_drop(&create_whitelist_table) + .expect("Could not create whitelist table."); Ok(()) } @@ -66,10 +73,15 @@ impl Database for MysqlDatabase { async fn load_persistent_torrents(&self) -> Result, database::Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let torrents: Vec<(InfoHash, u32)> = conn.query_map("SELECT info_hash, completed FROM torrents", |(info_hash_string, completed): (String, u32)| { - let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); - (info_hash, completed) - }).map_err(|_| database::Error::QueryReturnedNoRows)?; + let torrents: Vec<(InfoHash, u32)> = conn + .query_map( + "SELECT info_hash, completed FROM torrents", + |(info_hash_string, completed): (String, u32)| { + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + (info_hash, completed) + }, + ) + .map_err(|_| database::Error::QueryReturnedNoRows)?; Ok(torrents) } @@ -77,12 +89,15 @@ impl Database for MysqlDatabase { async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let keys: Vec = conn.query_map("SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| { - AuthKey { - key, - valid_until: Some(valid_until as u64) - } - }).map_err(|_| database::Error::QueryReturnedNoRows)?; + let keys: Vec = conn + .query_map( + "SELECT `key`, valid_until FROM `keys`", + |(key, valid_until): (String, i64)| AuthKey { + key, + valid_until: Some(valid_until as u64), + }, + ) + .map_err(|_| database::Error::QueryReturnedNoRows)?; Ok(keys) } @@ -90,9 +105,11 @@ impl Database for MysqlDatabase { async fn load_whitelist(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let info_hashes: Vec = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { - InfoHash::from_str(&info_hash).unwrap() - }).map_err(|_| database::Error::QueryReturnedNoRows)?; + let info_hashes: Vec = conn + .query_map("SELECT info_hash FROM whitelist", |info_hash: String| { + InfoHash::from_str(&info_hash).unwrap() + }) + .map_err(|_| database::Error::QueryReturnedNoRows)?; Ok(info_hashes) } @@ -118,14 +135,15 @@ impl Database for MysqlDatabase { async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.exec_first::("SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) - .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some(info_hash) => { - Ok(InfoHash::from_str(&info_hash).unwrap()) - } - None => { - Err(database::Error::InvalidQuery) - } + match conn + .exec_first::( + "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", + params! { info_hash }, + ) + .map_err(|_| database::Error::QueryReturnedNoRows)? + { + Some(info_hash) => Ok(InfoHash::from_str(&info_hash).unwrap()), + None => Err(database::Error::InvalidQuery), } } @@ -134,10 +152,11 @@ impl Database for MysqlDatabase { let info_hash_str = info_hash.to_string(); - match conn.exec_drop("INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }) { - Ok(_) => { - Ok(1) - } + match conn.exec_drop( + "INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", + params! { info_hash_str }, + ) { + Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -151,9 +170,7 @@ impl Database for MysqlDatabase { let info_hash = info_hash.to_string(); match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { - Ok(_) => { - Ok(1) - } + Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -164,17 +181,15 @@ impl Database for MysqlDatabase { async fn get_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) - .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some((key, valid_until)) => { - Ok(AuthKey { - key, - valid_until: Some(valid_until as u64), - }) - } - None => { - Err(database::Error::InvalidQuery) - } + match conn + .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) + .map_err(|_| database::Error::QueryReturnedNoRows)? + { + Some((key, valid_until)) => Ok(AuthKey { + key, + valid_until: Some(valid_until as u64), + }), + None => Err(database::Error::InvalidQuery), } } @@ -184,10 +199,11 @@ impl Database for MysqlDatabase { let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.unwrap_or(0).to_string(); - match conn.exec_drop("INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", params! { key, valid_until }) { - Ok(_) => { - Ok(1) - } + match conn.exec_drop( + "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", + params! { key, valid_until }, + ) { + Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) @@ -199,9 +215,7 @@ impl Database for MysqlDatabase { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { - Ok(_) => { - Ok(1) - } + Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); Err(database::Error::InvalidQuery) diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 143029ec2..3aba39919 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -5,10 +5,10 @@ use log::debug; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use crate::{InfoHash}; -use crate::databases::database::{Database, Error}; use crate::databases::database; +use crate::databases::database::{Database, Error}; use crate::tracker::key::AuthKey; +use crate::InfoHash; pub struct SqliteDatabase { pool: Pool, @@ -18,9 +18,7 @@ impl SqliteDatabase { pub fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); let pool = Pool::new(cm).expect("Failed to create r2d2 SQLite connection pool."); - Ok(SqliteDatabase { - pool - }) + Ok(SqliteDatabase { pool }) } } @@ -31,21 +29,24 @@ impl Database for SqliteDatabase { CREATE TABLE IF NOT EXISTS whitelist ( id INTEGER PRIMARY KEY AUTOINCREMENT, info_hash TEXT NOT NULL UNIQUE - );".to_string(); + );" + .to_string(); let create_torrents_table = " CREATE TABLE IF NOT EXISTS torrents ( id INTEGER PRIMARY KEY AUTOINCREMENT, info_hash TEXT NOT NULL UNIQUE, completed INTEGER DEFAULT 0 NOT NULL - );".to_string(); + );" + .to_string(); let create_keys_table = " CREATE TABLE IF NOT EXISTS keys ( id INTEGER PRIMARY KEY AUTOINCREMENT, key TEXT NOT NULL UNIQUE, valid_until INTEGER NOT NULL - );".to_string(); + );" + .to_string(); let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; @@ -84,7 +85,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(valid_until as u64) + valid_until: Some(valid_until as u64), }) })?; @@ -112,9 +113,14 @@ impl Database for SqliteDatabase { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", [info_hash.to_string(), completed.to_string()]) { + match conn.execute( + "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", + [info_hash.to_string(), completed.to_string()], + ) { Ok(updated) => { - if updated > 0 { return Ok(()); } + if updated > 0 { + return Ok(()); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { @@ -145,7 +151,9 @@ impl Database for SqliteDatabase { match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()]) { Ok(updated) => { - if updated > 0 { return Ok(updated); } + if updated > 0 { + return Ok(updated); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { @@ -160,7 +168,9 @@ impl Database for SqliteDatabase { match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()]) { Ok(updated) => { - if updated > 0 { return Ok(updated); } + if updated > 0 { + return Ok(updated); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { @@ -192,11 +202,14 @@ impl Database for SqliteDatabase { async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], + match conn.execute( + "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + [auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], ) { Ok(updated) => { - if updated > 0 { return Ok(updated); } + if updated > 0 { + return Ok(updated); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { @@ -211,7 +224,9 @@ impl Database for SqliteDatabase { match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { Ok(updated) => { - if updated > 0 { return Ok(updated); } + if updated > 0 { + return Ok(updated); + } Err(database::Error::QueryReturnedNoRows) } Err(e) => { diff --git a/src/http/filters.rs b/src/http/filters.rs index a288f8d97..514cb804c 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -3,44 +3,37 @@ use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; -use warp::{Filter, reject, Rejection}; +use warp::{reject, Filter, Rejection}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId}; -use crate::tracker::key::AuthKey; use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; +use crate::tracker::key::AuthKey; use crate::tracker::tracker::TorrentTracker; +use crate::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; /// Pass Arc along -pub fn with_tracker(tracker: Arc) -> impl Filter, ), Error=Infallible> + Clone { - warp::any() - .map(move || tracker.clone()) +pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { + warp::any().map(move || tracker.clone()) } /// Check for infoHash -pub fn with_info_hash() -> impl Filter, ), Error=Rejection> + Clone { - warp::filters::query::raw() - .and_then(info_hashes) +pub fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { + warp::filters::query::raw().and_then(info_hashes) } /// Check for PeerId -pub fn with_peer_id() -> impl Filter + Clone { - warp::filters::query::raw() - .and_then(peer_id) +pub fn with_peer_id() -> impl Filter + Clone { + warp::filters::query::raw().and_then(peer_id) } /// Pass Arc along -pub fn with_auth_key() -> impl Filter, ), Error=Infallible> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key: String| { - AuthKey::from_string(&key) - }) - .or_else(|_| async { - Ok::<(Option, ), Infallible>((None, )) - }) + .map(|key: String| AuthKey::from_string(&key)) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for PeerAddress -pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { +pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { warp::addr::remote() .and(warp::header::optional::("X-Forwarded-For")) .map(move |remote_addr: Option, x_forwarded_for: Option| { @@ -50,7 +43,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) @@ -59,7 +52,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) @@ -129,7 +122,9 @@ async fn peer_id(raw_query: String) -> WebResult { } /// Get PeerAddress from RemoteAddress or Forwarded -async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { +async fn peer_addr( + (on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option), +) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)); } @@ -148,16 +143,19 @@ async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Opti // set client ip to last forwarded ip let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - IpAddr::from_str(x_forwarded_ip).or_else(|_| { - Err(reject::custom(ServerError::AddressNotFound)) - }) + IpAddr::from_str(x_forwarded_ip).or_else(|_| Err(reject::custom(ServerError::AddressNotFound))) } - false => Ok(remote_addr.unwrap().ip()) + false => Ok(remote_addr.unwrap().ip()), } } /// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option -async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, peer_id: PeerId, peer_addr: IpAddr) -> WebResult { +async fn announce_request( + announce_request_query: AnnounceRequestQuery, + info_hashes: Vec, + peer_id: PeerId, + peer_addr: IpAddr, +) -> WebResult { Ok(AnnounceRequest { info_hash: info_hashes[0], peer_addr, @@ -173,8 +171,5 @@ async fn announce_request(announce_request_query: AnnounceRequestQuery, info_has /// Parse ScrapeRequest from InfoHash async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(ScrapeRequest { - info_hashes, - peer_addr, - }) + Ok(ScrapeRequest { info_hashes, peer_addr }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 0dc737641..5214bbe6e 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -4,19 +4,26 @@ use std::net::IpAddr; use std::sync::Arc; use log::debug; -use warp::{reject, Rejection, Reply}; use warp::http::Response; +use warp::{reject, Rejection, Reply}; -use crate::{InfoHash}; -use crate::tracker::key::AuthKey; -use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::http::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; +use crate::http::{ + AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, + WebResult, +}; use crate::peer::TorrentPeer; +use crate::tracker::key::AuthKey; use crate::tracker::statistics::TrackerStatisticsEvent; +use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::tracker::tracker::TorrentTracker; +use crate::InfoHash; /// Authenticate InfoHash using optional AuthKey -pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { +pub async fn authenticate( + info_hash: &InfoHash, + auth_key: &Option, + tracker: Arc, +) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, auth_key).await { Ok(_) => Ok(()), Err(e) => { @@ -35,15 +42,22 @@ pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, trac } /// Handle announce request -pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc) -> WebResult { +pub async fn handle_announce( + announce_request: AnnounceRequest, + auth_key: Option, + tracker: Arc, +) -> WebResult { if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { return Err(reject::custom(e)); } debug!("{:?}", announce_request); - let peer = TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await; + let peer = + TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + let torrent_stats = tracker + .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) + .await; // get all torrent peers excluding the peer_addr let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; @@ -52,15 +66,29 @@ pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option // send stats event match announce_request.peer_addr { - IpAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Announce).await; } - IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; } + IpAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; + } } - send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.min_announce_interval) + send_announce_response( + &announce_request, + torrent_stats, + peers, + announce_interval, + tracker.config.min_announce_interval, + ) } /// Handle scrape request -pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc) -> WebResult { +pub async fn handle_scrape( + scrape_request: ScrapeRequest, + auth_key: Option, + tracker: Arc, +) -> WebResult { let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; @@ -69,14 +97,24 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } + ScrapeResponseEntry { + complete: seeders, + downloaded: completed, + incomplete: leechers, + } } else { - ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } + ScrapeResponseEntry { + complete: 0, + downloaded: 0, + incomplete: 0, + } } } - None => { - ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } - } + None => ScrapeResponseEntry { + complete: 0, + downloaded: 0, + incomplete: 0, + }, }; files.insert(info_hash.clone(), scrape_entry); @@ -84,20 +122,33 @@ pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option { tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Scrape).await; } - IpAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Scrape).await; } + IpAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Scrape).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Scrape).await; + } } send_scrape_response(files) } /// Send announce response -fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: TorrentStats, peers: Vec, interval: u32, interval_min: u32) -> WebResult { - let http_peers: Vec = peers.iter().map(|peer| Peer { - peer_id: peer.peer_id.to_string(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port(), - }).collect(); +fn send_announce_response( + announce_request: &AnnounceRequest, + torrent_stats: TorrentStats, + peers: Vec, + interval: u32, + interval_min: u32, +) -> WebResult { + let http_peers: Vec = peers + .iter() + .map(|peer| Peer { + peer_id: peer.peer_id.to_string(), + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + }) + .collect(); let res = AnnounceResponse { interval, @@ -111,7 +162,7 @@ fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: Tor if let Some(1) = announce_request.compact { match res.write_compact() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)) + Err(_) => Err(reject::custom(ServerError::InternalServerError)), } } else { Ok(Response::new(res.write().into())) @@ -124,7 +175,7 @@ fn send_scrape_response(files: HashMap) -> WebRes match res.write() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)) + Err(_) => Err(reject::custom(ServerError::InternalServerError)), } } @@ -132,9 +183,15 @@ fn send_scrape_response(files: HashMap) -> WebRes pub async fn send_error(r: Rejection) -> std::result::Result { let body = if let Some(server_error) = r.find::() { debug!("{:?}", server_error); - ErrorResponse { failure_reason: server_error.to_string() }.write() + ErrorResponse { + failure_reason: server_error.to_string(), + } + .write() } else { - ErrorResponse { failure_reason: ServerError::InternalServerError.to_string() }.write() + ErrorResponse { + failure_reason: ServerError::InternalServerError.to_string(), + } + .write() }; Ok(Response::new(body)) diff --git a/src/http/mod.rs b/src/http/mod.rs index 07d077577..4842c0a25 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -6,13 +6,13 @@ pub use self::response::*; pub use self::routes::*; pub use self::server::*; -pub mod server; +pub mod errors; +pub mod filters; +pub mod handlers; pub mod request; pub mod response; -pub mod errors; pub mod routes; -pub mod handlers; -pub mod filters; +pub mod server; pub type Bytes = u64; pub type WebResult = std::result::Result; diff --git a/src/http/request.rs b/src/http/request.rs index 28cd4750e..6dd025e8c 100644 --- a/src/http/request.rs +++ b/src/http/request.rs @@ -2,8 +2,8 @@ use std::net::IpAddr; use serde::Deserialize; -use crate::{InfoHash, PeerId}; use crate::http::Bytes; +use crate::{InfoHash, PeerId}; #[derive(Deserialize)] pub struct AnnounceRequestQuery { diff --git a/src/http/response.rs b/src/http/response.rs index 2bdd4c1e7..4db12f995 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -5,6 +5,7 @@ use std::net::IpAddr; use serde; use serde::Serialize; + use crate::InfoHash; #[derive(Serialize)] diff --git a/src/http/routes.rs b/src/http/routes.rs index 53b2b0ce5..a9ca3027f 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -3,24 +3,18 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use crate::http::handle_announce; -use crate::http::handle_scrape; -use crate::http::send_error; -use crate::http::with_announce_request; -use crate::http::with_auth_key; -use crate::http::with_scrape_request; -use crate::http::with_tracker; +use crate::http::{ + handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker, +}; use crate::tracker::tracker::TorrentTracker; /// All routes -pub fn routes(tracker: Arc) -> impl Filter + Clone { - announce(tracker.clone()) - .or(scrape(tracker)) - .recover(send_error) +pub fn routes(tracker: Arc) -> impl Filter + Clone { + announce(tracker.clone()).or(scrape(tracker)).recover(send_error) } /// GET /announce or /announce/ -fn announce(tracker: Arc) -> impl Filter + Clone { +fn announce(tracker: Arc) -> impl Filter + Clone { warp::path::path("announce") .and(warp::filters::method::get()) .and(with_announce_request(tracker.config.on_reverse_proxy)) @@ -30,7 +24,7 @@ fn announce(tracker: Arc) -> impl Filter -fn scrape(tracker: Arc) -> impl Filter + Clone { +fn scrape(tracker: Arc) -> impl Filter + Clone { warp::path::path("scrape") .and(warp::filters::method::get()) .and(with_scrape_request(tracker.config.on_reverse_proxy)) diff --git a/src/http/server.rs b/src/http/server.rs index 5a5b5f735..8b92d8792 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -12,33 +12,31 @@ pub struct HttpServer { impl HttpServer { pub fn new(tracker: Arc) -> HttpServer { - HttpServer { - tracker - } + HttpServer { tracker } } /// Start the HttpServer pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { - let (_addr, server) = warp::serve(routes(self.tracker.clone())) - .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c() - .await - .expect("Failed to listen to shutdown signal."); - }); + let (_addr, server) = warp::serve(routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }); server } /// Start the HttpServer in TLS mode - pub fn start_tls(&self, socket_addr: SocketAddr, ssl_cert_path: String, ssl_key_path: String) -> impl warp::Future { + pub fn start_tls( + &self, + socket_addr: SocketAddr, + ssl_cert_path: String, + ssl_key_path: String, + ) -> impl warp::Future { let (_addr, server) = warp::serve(routes(self.tracker.clone())) .tls() .cert_path(ssl_cert_path) .key_path(ssl_key_path) .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c() - .await - .expect("Failed to listen to shutdown signal."); + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); server diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 85f64200f..ef67f0a7e 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -1,9 +1,11 @@ use std::net::SocketAddr; use std::sync::Arc; + use log::{info, warn}; use tokio::task::JoinHandle; -use crate::{HttpServer, HttpTrackerConfig}; + use crate::tracker::tracker::TorrentTracker; +use crate::{HttpServer, HttpTrackerConfig}; pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); @@ -19,7 +21,9 @@ pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> Jo http_tracker.start(bind_addr).await; } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { info!("Starting HTTPS server on: {} (TLS)", bind_addr); - http_tracker.start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()).await; + http_tracker + .start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()) + .await; } else { warn!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", bind_addr); } diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index c3e58e56e..8b8f0662b 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -1,4 +1,4 @@ +pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_api; -pub mod http_tracker; pub mod udp_tracker; diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 7d9967352..6e4b0c77e 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -1,9 +1,11 @@ use std::sync::Arc; + use chrono::Utc; use log::info; use tokio::task::JoinHandle; -use crate::{Configuration}; + use crate::tracker::tracker::TorrentTracker; +use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 476a87a6a..f3c9ae788 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -1,12 +1,18 @@ use std::sync::Arc; + use log::info; use tokio::task::JoinHandle; -use crate::{Configuration}; + use crate::api::server; use crate::tracker::tracker::TorrentTracker; +use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config.http_api.bind_address.parse::().expect("Tracker API bind_address invalid."); + let bind_addr = config + .http_api + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); info!("Starting Torrust API server on: {}", bind_addr); tokio::spawn(async move { diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 32ef76ef4..f93979c9f 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -1,8 +1,10 @@ use std::sync::Arc; + use log::{error, info, warn}; use tokio::task::JoinHandle; -use crate::{UdpServer, UdpTrackerConfig}; + use crate::tracker::tracker::TorrentTracker; +use crate::{UdpServer, UdpTrackerConfig}; pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); diff --git a/src/lib.rs b/src/lib.rs index 245f4686c..6dcc7e6da 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,18 +1,18 @@ +pub use api::server::*; pub use http::server::*; +pub use protocol::common::*; pub use udp::server::*; -pub use protocol::common::*; pub use self::config::*; -pub use api::server::*; pub use self::tracker::*; +pub mod api; pub mod config; -pub mod tracker; -pub mod logging; -pub mod udp; -pub mod http; -pub mod setup; pub mod databases; +pub mod http; pub mod jobs; -pub mod api; +pub mod logging; pub mod protocol; +pub mod setup; +pub mod tracker; +pub mod udp; diff --git a/src/logging.rs b/src/logging.rs index c2e77551f..209c9f848 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -5,19 +5,17 @@ use crate::Configuration; pub fn setup_logging(cfg: &Configuration) { let log_level = match &cfg.log_level { None => log::LevelFilter::Info, - Some(level) => { - match level.as_str() { - "off" => log::LevelFilter::Off, - "trace" => log::LevelFilter::Trace, - "debug" => log::LevelFilter::Debug, - "info" => log::LevelFilter::Info, - "warn" => log::LevelFilter::Warn, - "error" => log::LevelFilter::Error, - _ => { - panic!("Unknown log level encountered: '{}'", level.as_str()); - } + Some(level) => match level.as_str() { + "off" => log::LevelFilter::Off, + "trace" => log::LevelFilter::Trace, + "debug" => log::LevelFilter::Debug, + "info" => log::LevelFilter::Info, + "warn" => log::LevelFilter::Warn, + "error" => log::LevelFilter::Error, + _ => { + panic!("Unknown log level encountered: '{}'", level.as_str()); } - } + }, }; if let Err(_err) = fern::Dispatch::new() diff --git a/src/main.rs b/src/main.rs index 963419f03..0b406c85a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,9 +1,8 @@ use std::sync::Arc; + use log::info; -use torrust_tracker::Configuration; -use torrust_tracker::logging; -use torrust_tracker::setup; use torrust_tracker::tracker::tracker::TorrentTracker; +use torrust_tracker::{logging, setup, Configuration}; #[tokio::main] async fn main() { diff --git a/src/protocol/common.rs b/src/protocol/common.rs index 5d69ed0e1..92a3ed51c 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -221,8 +221,9 @@ impl PeerId { impl Serialize for PeerId { fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, { + where + S: serde::Serializer, + { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; binascii::bin2hex(&self.0, &mut tmp).unwrap(); diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 30b87b99b..e50c8b036 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -11,9 +11,7 @@ pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { } pub fn current_time() -> u64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH).unwrap() - .as_secs() + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs() } pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { diff --git a/src/setup.rs b/src/setup.rs index ed9b6d8ff..0c5ed9004 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -1,11 +1,13 @@ use std::sync::Arc; -use log::{warn}; + +use log::warn; use tokio::task::JoinHandle; -use crate::{Configuration}; + use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; use crate::tracker::tracker::TorrentTracker; +use crate::Configuration; -pub async fn setup(config: &Configuration, tracker: Arc) -> Vec>{ +pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); // Load peer keys @@ -15,15 +17,23 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Load whitelisted torrents if tracker.is_whitelisted() { - tracker.load_whitelist().await.expect("Could not load whitelist from database."); + tracker + .load_whitelist() + .await + .expect("Could not load whitelist from database."); } // Start the UDP blocks for udp_tracker_config in &config.udp_trackers { - if !udp_tracker_config.enabled { continue; } + if !udp_tracker_config.enabled { + continue; + } if tracker.is_private() { - warn!("Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", udp_tracker_config.bind_address, config.mode); + warn!( + "Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", + udp_tracker_config.bind_address, config.mode + ); } else { jobs.push(udp_tracker::start_job(&udp_tracker_config, tracker.clone())) } @@ -31,7 +41,9 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Start the HTTP blocks for http_tracker_config in &config.http_trackers { - if !http_tracker_config.enabled { continue; } + if !http_tracker_config.enabled { + continue; + } jobs.push(http_tracker::start_job(&http_tracker_config, tracker.clone())); } diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 2e2ca81f7..f935dac07 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -1,11 +1,10 @@ use derive_more::{Display, Error}; use log::debug; -use rand::{Rng, thread_rng}; use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; use serde::Serialize; use crate::protocol::utils::current_time; - use crate::AUTH_KEY_LENGTH; pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { @@ -25,8 +24,12 @@ pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { let current_time = current_time(); - if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } - if auth_key.valid_until.unwrap() < current_time { return Err(Error::KeyExpired); } + if auth_key.valid_until.is_none() { + return Err(Error::KeyInvalid); + } + if auth_key.valid_until.unwrap() < current_time { + return Err(Error::KeyExpired); + } Ok(()) } @@ -40,10 +43,7 @@ pub struct AuthKey { impl AuthKey { pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(AuthKey { - key, - valid_until: None, - }) + Some(AuthKey { key, valid_until: None }) } else { None } @@ -85,17 +85,10 @@ mod tests { #[test] fn auth_key_from_buffer() { - let auth_key = key::AuthKey::from_buffer( - [ - 89, 90, 83, 108, - 52, 108, 77, 90, - 117, 112, 82, 117, - 79, 112, 83, 82, - 67, 51, 107, 114, - 73, 75, 82, 53, - 66, 80, 66, 49, - 52, 110, 114, 74] - ); + let auth_key = key::AuthKey::from_buffer([ + 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, + 49, 52, 110, 114, 74, + ]); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().key, "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 791e2e7d2..bbb027a35 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,6 +1,6 @@ -pub mod tracker; -pub mod statistics; -pub mod peer; -pub mod torrent; pub mod key; pub mod mode; +pub mod peer; +pub mod statistics; +pub mod torrent; +pub mod tracker; diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index edcb27f1c..9110b7f4f 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -1,5 +1,5 @@ use serde; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] pub enum TrackerMode { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index ce4e52022..0514f41ed 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -2,11 +2,11 @@ use std::net::{IpAddr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; -use serde::{Serialize}; +use serde::Serialize; -use crate::protocol::common::{NumberOfBytesDef, AnnounceEventDef}; -use crate::protocol::utils::ser_instant; use crate::http::AnnounceRequest; +use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; +use crate::protocol::utils::ser_instant; use crate::PeerId; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] @@ -26,7 +26,11 @@ pub struct TorrentPeer { } impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + pub fn from_udp_announce_request( + announce_request: &aquatic_udp_protocol::AnnounceRequest, + remote_ip: IpAddr, + host_opt_ip: Option, + ) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); TorrentPeer { @@ -40,7 +44,11 @@ impl TorrentPeer { } } - pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + pub fn from_http_announce_request( + announce_request: &AnnounceRequest, + remote_ip: IpAddr, + host_opt_ip: Option, + ) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); let event: AnnounceEvent = if let Some(event) = &announce_request.event { @@ -48,7 +56,7 @@ impl TorrentPeer { "started" => AnnounceEvent::Started, "stopped" => AnnounceEvent::Stopped, "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None + _ => AnnounceEvent::None, } } else { AnnounceEvent::None @@ -74,5 +82,7 @@ impl TorrentPeer { } } - pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + pub fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index c67df72ec..85a2dbae9 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; -use tokio::sync::mpsc::Sender; use tokio::sync::mpsc::error::SendError; +use tokio::sync::mpsc::Sender; +use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 0c03e3f82..7950ce9c0 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,10 +1,10 @@ use std::net::{IpAddr, SocketAddr}; -use aquatic_udp_protocol::{AnnounceEvent}; +use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; -use crate::{MAX_SCRAPE_TORRENTS, PeerId}; use crate::peer::TorrentPeer; +use crate::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone)] pub struct TorrentEntry { @@ -54,11 +54,13 @@ impl TorrentEntry { // Filter out different ip_version from remote_addr Some(remote_addr) => { // Skip ip address of client - if peer.peer_addr.ip() == remote_addr.ip() { return false; } + if peer.peer_addr.ip() == remote_addr.ip() { + return false; + } match peer.peer_addr.ip() { - IpAddr::V4(_) => { remote_addr.is_ipv4() } - IpAddr::V6(_) => { remote_addr.is_ipv6() } + IpAddr::V4(_) => remote_addr.is_ipv4(), + IpAddr::V6(_) => remote_addr.is_ipv6(), } } }) @@ -73,9 +75,8 @@ impl TorrentEntry { } pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - self.peers.retain(|_, peer| { - peer.updated.elapsed() > std::time::Duration::from_secs(max_peer_timeout as u64) - }); + self.peers + .retain(|_, peer| peer.updated.elapsed() > std::time::Duration::from_secs(max_peer_timeout as u64)); } } diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 163bfe446..51d7716fb 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -3,19 +3,19 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use tokio::sync::{RwLock, RwLockReadGuard}; use tokio::sync::mpsc::error::SendError; +use tokio::sync::{RwLock, RwLockReadGuard}; -use crate::Configuration; -use crate::protocol::common::InfoHash; -use crate::databases::database::Database; use crate::databases::database; +use crate::databases::database::Database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; -use crate::tracker::key::AuthKey; +use crate::protocol::common::InfoHash; use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; use crate::tracker::key; +use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; +use crate::Configuration; pub struct TorrentTracker { pub config: Arc, @@ -24,7 +24,7 @@ pub struct TorrentTracker { whitelist: RwLock>, torrents: RwLock>, stats_tracker: StatsTracker, - database: Box + database: Box, } impl TorrentTracker { @@ -33,7 +33,9 @@ impl TorrentTracker { let mut stats_tracker = StatsTracker::new(); // starts a thread for updating tracker stats - if config.tracker_usage_statistics { stats_tracker.run_worker(); } + if config.tracker_usage_statistics { + stats_tracker.run_worker(); + } Ok(TorrentTracker { config: config.clone(), @@ -42,7 +44,7 @@ impl TorrentTracker { whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), stats_tracker, - database + database, }) } @@ -74,7 +76,7 @@ impl TorrentTracker { pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { match self.keys.read().await.get(&auth_key.key) { None => Err(key::Error::KeyInvalid), - Some(key) => key::verify_auth_key(key) + Some(key) => key::verify_auth_key(key), } } @@ -124,7 +126,9 @@ impl TorrentTracker { pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode - if self.is_public() { return Ok(()); } + if self.is_public() { + return Ok(()); + } // check if auth_key is set and valid if self.is_private() { @@ -157,7 +161,9 @@ impl TorrentTracker { for (info_hash, completed) in persistent_torrents { // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { continue; } + if torrents.contains_key(&info_hash) { + continue; + } let torrent_entry = TorrentEntry { peers: Default::default(), @@ -170,14 +176,12 @@ impl TorrentTracker { Ok(()) } - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr, ) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => { - entry.get_peers(Some(client_addr)).into_iter().cloned().collect() - } + Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), } } @@ -185,19 +189,18 @@ impl TorrentTracker { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(info_hash.clone()) { - Entry::Vacant(vacant) => { - vacant.insert(TorrentEntry::new()) - } - Entry::Occupied(entry) => { - entry.into_mut() - } + Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), + Entry::Occupied(entry) => entry.into_mut(), }; let stats_updated = torrent_entry.update_peer(peer); // todo: move this action to a separate worker if self.config.persistent_torrent_completed_stat && stats_updated { - let _ = self.database.save_persistent_torrent(&info_hash, torrent_entry.completed).await; + let _ = self + .database + .save_persistent_torrent(&info_hash, torrent_entry.completed) + .await; } let (seeders, completed, leechers) = torrent_entry.get_stats(); @@ -231,8 +234,8 @@ impl TorrentTracker { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); match self.config.persistent_torrent_completed_stat { - true => { torrent_entry.completed > 0 || torrent_entry.peers.len() > 0 } - false => { torrent_entry.peers.len() > 0 } + true => torrent_entry.completed > 0 || torrent_entry.peers.len() > 0, + false => torrent_entry.peers.len() > 0, } }); } else { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 860a2fe4b..907dac0bc 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -1,16 +1,19 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; +use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, + NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, +}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; use crate::peer::TorrentPeer; -use crate::tracker::torrent::{TorrentError}; -use crate::udp::errors::ServerError; -use crate::udp::request::AnnounceRequestWrapper; +use crate::protocol::utils::get_connection_id; use crate::tracker::statistics::TrackerStatisticsEvent; +use crate::tracker::torrent::TorrentError; use crate::tracker::tracker::TorrentTracker; -use crate::protocol::utils::get_connection_id; +use crate::udp::errors::ServerError; +use crate::udp::request::AnnounceRequestWrapper; +use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, &None).await { @@ -34,42 +37,38 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { Ok(request) => { let transaction_id = match &request { - Request::Connect(connect_request) => { - connect_request.transaction_id - } - Request::Announce(announce_request) => { - announce_request.transaction_id - } - Request::Scrape(scrape_request) => { - scrape_request.transaction_id - } + Request::Connect(connect_request) => connect_request.transaction_id, + Request::Announce(announce_request) => announce_request.transaction_id, + Request::Scrape(scrape_request) => scrape_request.transaction_id, }; match handle_request(request, remote_addr, tracker).await { Ok(response) => response, - Err(e) => handle_error(e, transaction_id) + Err(e) => handle_error(e, transaction_id), } } // bad request - Err(_) => handle_error(ServerError::BadRequest, TransactionId(0)) + Err(_) => handle_error(ServerError::BadRequest, TransactionId(0)), } } -pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: Arc) -> Result { +pub async fn handle_request( + request: Request, + remote_addr: SocketAddr, + tracker: Arc, +) -> Result { match request { - Request::Connect(connect_request) => { - handle_connect(remote_addr, &connect_request, tracker).await - } - Request::Announce(announce_request) => { - handle_announce(remote_addr, &announce_request, tracker).await - } - Request::Scrape(scrape_request) => { - handle_scrape(remote_addr, &scrape_request, tracker).await - } + Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, + Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, + Request::Scrape(scrape_request) => handle_scrape(remote_addr, &scrape_request, tracker).await, } } -pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: Arc) -> Result { +pub async fn handle_connect( + remote_addr: SocketAddr, + request: &ConnectRequest, + tracker: Arc, +) -> Result { let connection_id = get_connection_id(&remote_addr); let response = Response::from(ConnectResponse { @@ -79,26 +78,42 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Connect).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Connect).await; } + SocketAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp4Connect).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp6Connect).await; + } } Ok(response) } -pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: Arc) -> Result { +pub async fn handle_announce( + remote_addr: SocketAddr, + announce_request: &AnnounceRequest, + tracker: Arc, +) -> Result { let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; - let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip()); + let peer = TorrentPeer::from_udp_announce_request( + &wrapped_announce_request.announce_request, + remote_addr.ip(), + tracker.config.get_ext_ip(), + ); //let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; + let torrent_stats = tracker + .update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer) + .await; // get all peers excluding the client_addr - let peers = tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await; + let peers = tracker + .get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr) + .await; let announce_response = if remote_addr.is_ipv4() { Response::from(AnnounceResponse { @@ -106,16 +121,19 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), leechers: NumberOfPeers(torrent_stats.leechers as i32), seeders: NumberOfPeers(torrent_stats.seeders as i32), - peers: peers.iter() - .filter_map(|peer| if let IpAddr::V4(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip, - port: Port(peer.peer_addr.port()), - }) - } else { - None - } - ).collect(), + peers: peers + .iter() + .filter_map(|peer| { + if let IpAddr::V4(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip, + port: Port(peer.peer_addr.port()), + }) + } else { + None + } + }) + .collect(), }) } else { Response::from(AnnounceResponse { @@ -123,30 +141,41 @@ pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &Announc announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), leechers: NumberOfPeers(torrent_stats.leechers as i32), seeders: NumberOfPeers(torrent_stats.seeders as i32), - peers: peers.iter() - .filter_map(|peer| if let IpAddr::V6(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip, - port: Port(peer.peer_addr.port()), - }) - } else { - None - } - ).collect(), + peers: peers + .iter() + .filter_map(|peer| { + if let IpAddr::V6(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip, + port: Port(peer.peer_addr.port()), + }) + } else { + None + } + }) + .collect(), }) }; // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Announce).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Announce).await; } + SocketAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp4Announce).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp6Announce).await; + } } Ok(announce_response) } // todo: refactor this, db lock can be a lot shorter -pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc) -> Result { +pub async fn handle_scrape( + remote_addr: SocketAddr, + request: &ScrapeRequest, + tracker: Arc, +) -> Result { let db = tracker.get_torrents().await; let mut torrent_stats: Vec = Vec::new(); @@ -172,13 +201,11 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra } } } - None => { - TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), - } - } + None => TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + }, }; torrent_stats.push(scrape_entry); @@ -188,8 +215,12 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra // send stats event match remote_addr { - SocketAddr::V4(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; } - SocketAddr::V6(_) => { tracker.send_stats_event(TrackerStatisticsEvent::Udp6Scrape).await; } + SocketAddr::V4(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(TrackerStatisticsEvent::Udp6Scrape).await; + } } Ok(Response::from(ScrapeResponse { @@ -200,5 +231,8 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { let message = e.to_string(); - Response::from(ErrorResponse { transaction_id, message: message.into() }) + Response::from(ErrorResponse { + transaction_id, + message: message.into(), + }) } diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 25780ba93..ae87973f1 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -4,9 +4,9 @@ pub use self::request::*; pub use self::server::*; pub mod errors; +pub mod handlers; pub mod request; pub mod server; -pub mod handlers; pub type Bytes = u64; pub type Port = u16; diff --git a/src/udp/server.rs b/src/udp/server.rs index bcacc2642..11cb61d99 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -62,7 +62,9 @@ impl UdpServer { debug!("{:?}", &inner[..position]); UdpServer::send_packet(socket, &remote_addr, &inner[..position]).await; } - Err(_) => { debug!("could not write response to bytes."); } + Err(_) => { + debug!("could not write response to bytes."); + } } } From 1f3dd8a2bb2bb5eb1469e962fd2e423d1d6c6f77 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 18:32:39 +0200 Subject: [PATCH 065/435] ci: update workflow to enforce formatting --- .github/workflows/test_build_release.yml | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index d848ed653..87f6a9488 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -4,7 +4,23 @@ name: CI on: [push, pull_request] jobs: + format: + runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + components: rustfmt + - uses: Swatinem/rust-cache@v1 + - name: Check Rust Formatting + run: cargo fmt --check + test: + needs: format runs-on: ubuntu-latest env: CARGO_TERM_COLOR: always @@ -18,7 +34,7 @@ jobs: - uses: Swatinem/rust-cache@v1 - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - - name: Run tests + - name: Run Tests run: cargo llvm-cov nextest build: @@ -37,9 +53,9 @@ jobs: profile: minimal toolchain: stable - uses: Swatinem/rust-cache@v1 - - name: Build torrust tracker + - name: Build Torrust Tracker run: cargo build --release - - name: Upload build artifact + - name: Upload Build Artifact uses: actions/upload-artifact@v2 with: name: torrust-tracker @@ -49,7 +65,7 @@ jobs: needs: build runs-on: ubuntu-latest steps: - - name: Download build artifact + - name: Download Build Artifact uses: actions/download-artifact@v2 with: name: torrust-tracker From e622c132fcf986487a290fc5a50403ed479ce33e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 18:49:42 +0200 Subject: [PATCH 066/435] formatting: add formatting commit to the git blame ignore file --- .git-blame-ignore | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .git-blame-ignore diff --git a/.git-blame-ignore b/.git-blame-ignore new file mode 100644 index 000000000..06c439a36 --- /dev/null +++ b/.git-blame-ignore @@ -0,0 +1,4 @@ +# https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revs-fileltfilegt + +# Format the world! +57bf2000e39dccfc2f8b6e41d6c6f3eac38a3886 From 2ae7ab47ba19a22b7d98d31096cae605b26c3a8a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 13 Sep 2022 00:32:05 +0200 Subject: [PATCH 067/435] clock: add mockable clock --- Cargo.lock | 1 + Cargo.toml | 9 +- src/lib.rs | 11 ++ src/main.rs | 5 +- src/protocol/clock/clock.rs | 248 ++++++++++++++++++++++++++++++++++++ src/protocol/clock/mod.rs | 1 + src/protocol/mod.rs | 1 + 7 files changed, 274 insertions(+), 2 deletions(-) create mode 100644 src/protocol/clock/clock.rs create mode 100644 src/protocol/clock/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 279e4a67d..1a4fe8b4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2341,6 +2341,7 @@ dependencies = [ "fern", "futures", "hex", + "lazy_static", "log", "openssl", "percent-encoding", diff --git a/Cargo.toml b/Cargo.toml index 9d21ed7d7..89fdffa99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,13 @@ lto = "fat" strip = true [dependencies] -tokio = { version = "1.7", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +tokio = { version = "1.7", features = [ + "rt-multi-thread", + "net", + "sync", + "macros", + "signal", +] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" @@ -28,6 +34,7 @@ serde_with = "2.0.0" hex = "0.4.3" percent-encoding = "2.1.0" binascii = "0.1" +lazy_static = "1.4.0" openssl = { version = "0.10.41", features = ["vendored"] } diff --git a/src/lib.rs b/src/lib.rs index 6dcc7e6da..882e126bc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -16,3 +16,14 @@ pub mod protocol; pub mod setup; pub mod tracker; pub mod udp; + +#[macro_use] +extern crate lazy_static; + +pub mod static_time { + use std::time::SystemTime; + + lazy_static! { + pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); + } +} diff --git a/src/main.rs b/src/main.rs index 0b406c85a..01121052a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,12 +2,15 @@ use std::sync::Arc; use log::info; use torrust_tracker::tracker::tracker::TorrentTracker; -use torrust_tracker::{logging, setup, Configuration}; +use torrust_tracker::{logging, setup, static_time, Configuration}; #[tokio::main] async fn main() { const CONFIG_PATH: &str = "config.toml"; + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + // Initialize Torrust config let config = match Configuration::load_from_file(CONFIG_PATH) { Ok(config) => Arc::new(config), diff --git a/src/protocol/clock/clock.rs b/src/protocol/clock/clock.rs new file mode 100644 index 000000000..db59170b3 --- /dev/null +++ b/src/protocol/clock/clock.rs @@ -0,0 +1,248 @@ +use std::num::IntErrorKind; +pub use std::time::Duration; + +pub type SinceUnixEpoch = Duration; + +#[derive(Debug)] +pub enum ClockType { + WorkingClock, + StoppedClock, +} + +#[derive(Debug)] +pub struct Clock; + +pub type WorkingClock = Clock<{ ClockType::WorkingClock as usize }>; +pub type StoppedClock = Clock<{ ClockType::StoppedClock as usize }>; + +#[cfg(not(test))] +pub type DefaultClock = WorkingClock; + +#[cfg(test)] +pub type DefaultClock = StoppedClock; + +pub trait Time: Sized { + fn now() -> SinceUnixEpoch; +} + +pub trait TimeNow: Time { + fn add(add_time: &Duration) -> Option { + Self::now().checked_add(*add_time) + } + fn sub(sub_time: &Duration) -> Option { + Self::now().checked_sub(*sub_time) + } +} + +#[cfg(test)] +mod tests { + use std::any::TypeId; + + use crate::protocol::clock::clock::{DefaultClock, StoppedClock, Time, WorkingClock}; + + #[test] + fn it_should_be_the_stopped_clock_as_default_when_testing() { + // We are testing, so we should default to the fixed time. + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(StoppedClock::now(), DefaultClock::now()) + } + + #[test] + fn it_should_have_different_times() { + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(StoppedClock::now(), WorkingClock::now()) + } +} + +mod working_clock { + use std::time::SystemTime; + + use super::{SinceUnixEpoch, Time, TimeNow, WorkingClock}; + + impl Time for WorkingClock { + fn now() -> SinceUnixEpoch { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() + } + } + + impl TimeNow for WorkingClock {} +} + +pub trait StoppedTime: TimeNow { + fn local_set(unix_time: &SinceUnixEpoch); + fn local_set_to_unix_epoch() { + Self::local_set(&SinceUnixEpoch::ZERO) + } + fn local_set_to_app_start_time(); + fn local_set_to_system_time_now(); + fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; + fn local_reset(); +} + +mod stopped_clock { + use std::num::IntErrorKind; + use std::time::Duration; + + use super::{SinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow}; + + impl Time for StoppedClock { + fn now() -> SinceUnixEpoch { + detail::FIXED_TIME.with(|time| { + return *time.borrow(); + }) + } + } + + impl TimeNow for StoppedClock {} + + impl StoppedTime for StoppedClock { + fn local_set(unix_time: &SinceUnixEpoch) { + detail::FIXED_TIME.with(|time| { + *time.borrow_mut() = *unix_time; + }) + } + + fn local_set_to_app_start_time() { + Self::local_set(&detail::get_app_start_time()) + } + + fn local_set_to_system_time_now() { + Self::local_set(&detail::get_app_start_time()) + } + + fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_add(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::PosOverflow); + } + }; + Ok(()) + }) + } + + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::NegOverflow); + } + }; + Ok(()) + }) + } + + fn local_reset() { + Self::local_set(&detail::get_default_fixed_time()) + } + } + + #[cfg(test)] + mod tests { + use std::thread; + use std::time::Duration; + + use crate::protocol::clock::clock::{SinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; + + #[test] + fn it_should_default_to_zero_when_testing() { + assert_eq!(StoppedClock::now(), SinceUnixEpoch::ZERO) + } + + #[test] + fn it_should_possible_to_set_the_time() { + // Check we start with ZERO. + assert_eq!(StoppedClock::now(), Duration::ZERO); + + // Set to Current Time and Check + let timestamp = WorkingClock::now(); + StoppedClock::local_set(×tamp); + assert_eq!(StoppedClock::now(), timestamp); + + // Elapse the Current Time and Check + StoppedClock::local_add(×tamp).unwrap(); + assert_eq!(StoppedClock::now(), timestamp + timestamp); + + // Reset to ZERO and Check + StoppedClock::local_reset(); + assert_eq!(StoppedClock::now(), Duration::ZERO); + } + + #[test] + fn it_should_default_to_zero_on_thread_exit() { + assert_eq!(StoppedClock::now(), Duration::ZERO); + let after5 = WorkingClock::add(&Duration::from_secs(5)).unwrap(); + StoppedClock::local_set(&after5); + assert_eq!(StoppedClock::now(), after5); + + let t = thread::spawn(move || { + // each thread starts out with the initial value of ZERO + assert_eq!(StoppedClock::now(), Duration::ZERO); + + // and gets set to the current time. + let timestamp = WorkingClock::now(); + StoppedClock::local_set(×tamp); + assert_eq!(StoppedClock::now(), timestamp); + }); + + // wait for the thread to complete and bail out on panic + t.join().unwrap(); + + // we retain our original value of current time + 5sec despite the child thread + assert_eq!(StoppedClock::now(), after5); + + // Reset to ZERO and Check + StoppedClock::local_reset(); + assert_eq!(StoppedClock::now(), Duration::ZERO); + } + } + + mod detail { + use std::cell::RefCell; + use std::time::SystemTime; + + use crate::protocol::clock::clock::SinceUnixEpoch; + use crate::static_time; + + pub fn get_app_start_time() -> SinceUnixEpoch { + (*static_time::TIME_AT_APP_START) + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + } + + #[cfg(not(test))] + pub fn get_default_fixed_time() -> SinceUnixEpoch { + get_app_start_time() + } + + #[cfg(test)] + pub fn get_default_fixed_time() -> SinceUnixEpoch { + SinceUnixEpoch::ZERO + } + + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + + #[cfg(test)] + mod tests { + use std::time::Duration; + + use crate::protocol::clock::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; + + #[test] + fn it_should_get_the_zero_start_time_when_testing() { + assert_eq!(get_default_fixed_time(), Duration::ZERO); + } + + #[test] + fn it_should_get_app_start_time() { + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1662983731, 000022312); + assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); + } + } + } +} diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs new file mode 100644 index 000000000..159730d2b --- /dev/null +++ b/src/protocol/clock/mod.rs @@ -0,0 +1 @@ +pub mod clock; diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs index 99cfd91e4..fcb28b3b2 100644 --- a/src/protocol/mod.rs +++ b/src/protocol/mod.rs @@ -1,2 +1,3 @@ +pub mod clock; pub mod common; pub mod utils; From cab093c026cf253ba6f7208211953a25b3d1c91b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 13 Sep 2022 00:44:12 +0200 Subject: [PATCH 068/435] clock: use mockable clock in project --- src/api/server.rs | 3 ++- src/databases/mysql.rs | 7 ++++--- src/databases/sqlite.rs | 7 ++++--- src/protocol/utils.rs | 14 ++++++-------- src/tracker/key.rs | 36 ++++++++++++++++++++++++++---------- src/tracker/peer.rs | 11 ++++++----- src/tracker/torrent.rs | 6 ++++-- src/tracker/tracker.rs | 5 +++-- src/udp/errors.rs | 3 +++ 9 files changed, 58 insertions(+), 34 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index cc6c905e4..5285c9b2b 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -2,6 +2,7 @@ use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; @@ -268,7 +269,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp (seconds_valid, tracker) }) .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { - match tracker.generate_auth_key(seconds_valid).await { + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => Ok(warp::reply::json(&auth_key)), Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into(), diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 882fb7bf4..33287df6d 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -1,4 +1,5 @@ use std::str::FromStr; +use std::time::Duration; use async_trait::async_trait; use log::debug; @@ -94,7 +95,7 @@ impl Database for MysqlDatabase { "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| AuthKey { key, - valid_until: Some(valid_until as u64), + valid_until: Some(Duration::from_secs(valid_until as u64)), }, ) .map_err(|_| database::Error::QueryReturnedNoRows)?; @@ -187,7 +188,7 @@ impl Database for MysqlDatabase { { Some((key, valid_until)) => Ok(AuthKey { key, - valid_until: Some(valid_until as u64), + valid_until: Some(Duration::from_secs(valid_until as u64)), }), None => Err(database::Error::InvalidQuery), } @@ -197,7 +198,7 @@ impl Database for MysqlDatabase { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let key = auth_key.key.to_string(); - let valid_until = auth_key.valid_until.unwrap_or(0).to_string(); + let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); match conn.exec_drop( "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 3aba39919..ff080306d 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -7,6 +7,7 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; +use crate::protocol::clock::clock::SinceUnixEpoch; use crate::tracker::key::AuthKey; use crate::InfoHash; @@ -85,7 +86,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(valid_until as u64), + valid_until: Some(SinceUnixEpoch::from_secs(valid_until as u64)), }) })?; @@ -192,7 +193,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(valid_until_i64 as u64), + valid_until: Some(SinceUnixEpoch::from_secs(valid_until_i64 as u64)), }) } else { Err(database::Error::QueryReturnedNoRows) @@ -204,7 +205,7 @@ impl Database for SqliteDatabase { match conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()], + [auth_key.key.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], ) { Ok(updated) => { if updated > 0 { diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index e50c8b036..f2a68fdb3 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,19 +1,17 @@ use std::net::SocketAddr; -use std::time::SystemTime; use aquatic_udp_protocol::ConnectionId; +use super::clock::clock::{DefaultClock, SinceUnixEpoch, Time}; + pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { - match std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) { - Ok(duration) => ConnectionId(((duration.as_secs() / 3600) | ((remote_address.port() as u64) << 36)) as i64), - Err(_) => ConnectionId(0x7FFFFFFFFFFFFFFF), - } + ConnectionId(((current_time() / 3600) | ((remote_address.port() as u64) << 36)) as i64) } pub fn current_time() -> u64 { - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs() + DefaultClock::now().as_secs() } -pub fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) +pub fn ser_unix_time_value(unix_time_value: &SinceUnixEpoch, ser: S) -> Result { + ser.serialize_u64(unix_time_value.as_millis() as u64) } diff --git a/src/tracker/key.rs b/src/tracker/key.rs index f935dac07..8ba19ab12 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -1,29 +1,31 @@ +use std::time::Duration; + use derive_more::{Display, Error}; use log::debug; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::Serialize; -use crate::protocol::utils::current_time; +use crate::protocol::clock::clock::{DefaultClock, SinceUnixEpoch, Time, TimeNow}; use crate::AUTH_KEY_LENGTH; -pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { +pub fn generate_auth_key(lifetime: Duration) -> AuthKey { let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) .map(char::from) .collect(); - debug!("Generated key: {}, valid for: {} seconds", key, seconds_valid); + debug!("Generated key: {}, valid for: {:?} seconds", key, lifetime); AuthKey { key, - valid_until: Some(current_time() + seconds_valid), + valid_until: Some(DefaultClock::add(&lifetime).unwrap()), } } pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { - let current_time = current_time(); + let current_time: SinceUnixEpoch = DefaultClock::now(); if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } @@ -37,7 +39,7 @@ pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { #[derive(Serialize, Debug, Eq, PartialEq, Clone)] pub struct AuthKey { pub key: String, - pub valid_until: Option, + pub valid_until: Option, } impl AuthKey { @@ -81,6 +83,9 @@ impl From for Error { #[cfg(test)] mod tests { + use std::time::Duration; + + use crate::protocol::clock::clock::{DefaultClock, StoppedTime}; use crate::tracker::key; #[test] @@ -105,15 +110,26 @@ mod tests { #[test] fn generate_valid_auth_key() { - let auth_key = key::generate_auth_key(9999); + let auth_key = key::generate_auth_key(Duration::new(9999, 0)); assert!(key::verify_auth_key(&auth_key).is_ok()); } #[test] - fn generate_expired_auth_key() { - let mut auth_key = key::generate_auth_key(0); - auth_key.valid_until = Some(0); + fn generate_and_check_expired_auth_key() { + // Set the time to the current time. + DefaultClock::local_set_to_system_time_now(); + + // Make key that is valid for 19 seconds. + let auth_key = key::generate_auth_key(Duration::from_secs(19)); + + // Mock the time has passed 10 sec. + DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(key::verify_auth_key(&auth_key).is_ok()); + + // Mock the time has passed another 10 sec. + DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); assert!(key::verify_auth_key(&auth_key).is_err()); } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 0514f41ed..b37090b8e 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -5,16 +5,17 @@ use serde; use serde::Serialize; use crate::http::AnnounceRequest; +use crate::protocol::clock::clock::{DefaultClock, SinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::protocol::utils::ser_instant; +use crate::protocol::utils::ser_unix_time_value; use crate::PeerId; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] pub struct TorrentPeer { pub peer_id: PeerId, pub peer_addr: SocketAddr, - #[serde(serialize_with = "ser_instant")] - pub updated: std::time::Instant, + #[serde(serialize_with = "ser_unix_time_value")] + pub updated: SinceUnixEpoch, #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] @@ -36,7 +37,7 @@ impl TorrentPeer { TorrentPeer { peer_id: PeerId(announce_request.peer_id.0), peer_addr, - updated: std::time::Instant::now(), + updated: DefaultClock::now(), uploaded: announce_request.bytes_uploaded, downloaded: announce_request.bytes_downloaded, left: announce_request.bytes_left, @@ -65,7 +66,7 @@ impl TorrentPeer { TorrentPeer { peer_id: announce_request.peer_id.clone(), peer_addr, - updated: std::time::Instant::now(), + updated: DefaultClock::now(), uploaded: NumberOfBytes(announce_request.uploaded as i64), downloaded: NumberOfBytes(announce_request.downloaded as i64), left: NumberOfBytes(announce_request.left as i64), diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 7950ce9c0..b08f03266 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,9 +1,11 @@ use std::net::{IpAddr, SocketAddr}; +use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; use crate::peer::TorrentPeer; +use crate::protocol::clock::clock::{DefaultClock, TimeNow}; use crate::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone)] @@ -75,8 +77,8 @@ impl TorrentEntry { } pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - self.peers - .retain(|_, peer| peer.updated.elapsed() > std::time::Duration::from_secs(max_peer_timeout as u64)); + let current_cutoff = DefaultClock::sub(&Duration::from_secs(max_peer_timeout as u64)).unwrap_or_default(); + self.peers.retain(|_, peer| peer.updated > current_cutoff); } } diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 51d7716fb..9a242e41a 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -2,6 +2,7 @@ use std::collections::btree_map::Entry; use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -60,8 +61,8 @@ impl TorrentTracker { self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed } - pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { - let auth_key = key::generate_auth_key(seconds_valid); + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = key::generate_auth_key(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) diff --git a/src/udp/errors.rs b/src/udp/errors.rs index fb29e969e..8d7b04b4f 100644 --- a/src/udp/errors.rs +++ b/src/udp/errors.rs @@ -8,6 +8,9 @@ pub enum ServerError { #[error("info_hash is either missing or invalid")] InvalidInfoHash, + #[error("connection id could not be verified")] + InvalidConnectionId, + #[error("could not find remote address")] AddressNotFound, From 7ac49c8228985324042a86c4cfea7205283edff1 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 14 Sep 2022 20:26:20 +0200 Subject: [PATCH 069/435] refactor: renamed `SinceUnixEpoch` to `DurationSinceUnixEpoch` to be more descriptive --- src/databases/sqlite.rs | 6 +++--- src/protocol/clock/clock.rs | 38 ++++++++++++++++++------------------- src/protocol/utils.rs | 4 ++-- src/tracker/key.rs | 6 +++--- src/tracker/peer.rs | 4 ++-- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index ff080306d..a329b6bfc 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -7,7 +7,7 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; -use crate::protocol::clock::clock::SinceUnixEpoch; +use crate::protocol::clock::clock::DurationSinceUnixEpoch; use crate::tracker::key::AuthKey; use crate::InfoHash; @@ -86,7 +86,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(SinceUnixEpoch::from_secs(valid_until as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until as u64)), }) })?; @@ -193,7 +193,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(SinceUnixEpoch::from_secs(valid_until_i64 as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until_i64 as u64)), }) } else { Err(database::Error::QueryReturnedNoRows) diff --git a/src/protocol/clock/clock.rs b/src/protocol/clock/clock.rs index db59170b3..538edcfed 100644 --- a/src/protocol/clock/clock.rs +++ b/src/protocol/clock/clock.rs @@ -1,7 +1,7 @@ use std::num::IntErrorKind; pub use std::time::Duration; -pub type SinceUnixEpoch = Duration; +pub type DurationSinceUnixEpoch = Duration; #[derive(Debug)] pub enum ClockType { @@ -22,14 +22,14 @@ pub type DefaultClock = WorkingClock; pub type DefaultClock = StoppedClock; pub trait Time: Sized { - fn now() -> SinceUnixEpoch; + fn now() -> DurationSinceUnixEpoch; } pub trait TimeNow: Time { - fn add(add_time: &Duration) -> Option { + fn add(add_time: &Duration) -> Option { Self::now().checked_add(*add_time) } - fn sub(sub_time: &Duration) -> Option { + fn sub(sub_time: &Duration) -> Option { Self::now().checked_sub(*sub_time) } } @@ -57,10 +57,10 @@ mod tests { mod working_clock { use std::time::SystemTime; - use super::{SinceUnixEpoch, Time, TimeNow, WorkingClock}; + use super::{DurationSinceUnixEpoch, Time, TimeNow, WorkingClock}; impl Time for WorkingClock { - fn now() -> SinceUnixEpoch { + fn now() -> DurationSinceUnixEpoch { SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() } } @@ -69,9 +69,9 @@ mod working_clock { } pub trait StoppedTime: TimeNow { - fn local_set(unix_time: &SinceUnixEpoch); + fn local_set(unix_time: &DurationSinceUnixEpoch); fn local_set_to_unix_epoch() { - Self::local_set(&SinceUnixEpoch::ZERO) + Self::local_set(&DurationSinceUnixEpoch::ZERO) } fn local_set_to_app_start_time(); fn local_set_to_system_time_now(); @@ -84,10 +84,10 @@ mod stopped_clock { use std::num::IntErrorKind; use std::time::Duration; - use super::{SinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow}; + use super::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow}; impl Time for StoppedClock { - fn now() -> SinceUnixEpoch { + fn now() -> DurationSinceUnixEpoch { detail::FIXED_TIME.with(|time| { return *time.borrow(); }) @@ -97,7 +97,7 @@ mod stopped_clock { impl TimeNow for StoppedClock {} impl StoppedTime for StoppedClock { - fn local_set(unix_time: &SinceUnixEpoch) { + fn local_set(unix_time: &DurationSinceUnixEpoch) { detail::FIXED_TIME.with(|time| { *time.borrow_mut() = *unix_time; }) @@ -147,11 +147,11 @@ mod stopped_clock { use std::thread; use std::time::Duration; - use crate::protocol::clock::clock::{SinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; + use crate::protocol::clock::clock::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; #[test] fn it_should_default_to_zero_when_testing() { - assert_eq!(StoppedClock::now(), SinceUnixEpoch::ZERO) + assert_eq!(StoppedClock::now(), DurationSinceUnixEpoch::ZERO) } #[test] @@ -206,26 +206,26 @@ mod stopped_clock { use std::cell::RefCell; use std::time::SystemTime; - use crate::protocol::clock::clock::SinceUnixEpoch; + use crate::protocol::clock::clock::DurationSinceUnixEpoch; use crate::static_time; - pub fn get_app_start_time() -> SinceUnixEpoch { + pub fn get_app_start_time() -> DurationSinceUnixEpoch { (*static_time::TIME_AT_APP_START) .duration_since(SystemTime::UNIX_EPOCH) .unwrap() } #[cfg(not(test))] - pub fn get_default_fixed_time() -> SinceUnixEpoch { + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { get_app_start_time() } #[cfg(test)] - pub fn get_default_fixed_time() -> SinceUnixEpoch { - SinceUnixEpoch::ZERO + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::ZERO } - thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); #[cfg(test)] mod tests { diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index f2a68fdb3..127baa4eb 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use aquatic_udp_protocol::ConnectionId; -use super::clock::clock::{DefaultClock, SinceUnixEpoch, Time}; +use super::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { ConnectionId(((current_time() / 3600) | ((remote_address.port() as u64) << 36)) as i64) @@ -12,6 +12,6 @@ pub fn current_time() -> u64 { DefaultClock::now().as_secs() } -pub fn ser_unix_time_value(unix_time_value: &SinceUnixEpoch, ser: S) -> Result { +pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { ser.serialize_u64(unix_time_value.as_millis() as u64) } diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 8ba19ab12..76ac21527 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -6,7 +6,7 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::Serialize; -use crate::protocol::clock::clock::{DefaultClock, SinceUnixEpoch, Time, TimeNow}; +use crate::protocol::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; use crate::AUTH_KEY_LENGTH; pub fn generate_auth_key(lifetime: Duration) -> AuthKey { @@ -25,7 +25,7 @@ pub fn generate_auth_key(lifetime: Duration) -> AuthKey { } pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { - let current_time: SinceUnixEpoch = DefaultClock::now(); + let current_time: DurationSinceUnixEpoch = DefaultClock::now(); if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } @@ -39,7 +39,7 @@ pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { #[derive(Serialize, Debug, Eq, PartialEq, Clone)] pub struct AuthKey { pub key: String, - pub valid_until: Option, + pub valid_until: Option, } impl AuthKey { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index b37090b8e..b10c97bcc 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -5,7 +5,7 @@ use serde; use serde::Serialize; use crate::http::AnnounceRequest; -use crate::protocol::clock::clock::{DefaultClock, SinceUnixEpoch, Time}; +use crate::protocol::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; use crate::PeerId; @@ -15,7 +15,7 @@ pub struct TorrentPeer { pub peer_id: PeerId, pub peer_addr: SocketAddr, #[serde(serialize_with = "ser_unix_time_value")] - pub updated: SinceUnixEpoch, + pub updated: DurationSinceUnixEpoch, #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] From 20f099eb0763a2d4625b788af63b2cd808440dd8 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 19 Sep 2022 08:06:55 +0200 Subject: [PATCH 070/435] refactor: move clock/clock.rs into clock.rs (clippy) --- src/databases/sqlite.rs | 2 +- src/protocol/{clock => }/clock.rs | 8 ++++---- src/protocol/clock/mod.rs | 1 - src/protocol/utils.rs | 2 +- src/tracker/key.rs | 4 ++-- src/tracker/peer.rs | 5 ++--- src/tracker/torrent.rs | 2 +- 7 files changed, 11 insertions(+), 13 deletions(-) rename src/protocol/{clock => }/clock.rs (94%) delete mode 100644 src/protocol/clock/mod.rs diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index a329b6bfc..fb66c0b94 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -7,7 +7,7 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; -use crate::protocol::clock::clock::DurationSinceUnixEpoch; +use crate::protocol::clock::DurationSinceUnixEpoch; use crate::tracker::key::AuthKey; use crate::InfoHash; diff --git a/src/protocol/clock/clock.rs b/src/protocol/clock.rs similarity index 94% rename from src/protocol/clock/clock.rs rename to src/protocol/clock.rs index 538edcfed..a72f3699e 100644 --- a/src/protocol/clock/clock.rs +++ b/src/protocol/clock.rs @@ -38,7 +38,7 @@ pub trait TimeNow: Time { mod tests { use std::any::TypeId; - use crate::protocol::clock::clock::{DefaultClock, StoppedClock, Time, WorkingClock}; + use crate::protocol::clock::{DefaultClock, StoppedClock, Time, WorkingClock}; #[test] fn it_should_be_the_stopped_clock_as_default_when_testing() { @@ -147,7 +147,7 @@ mod stopped_clock { use std::thread; use std::time::Duration; - use crate::protocol::clock::clock::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; + use crate::protocol::clock::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; #[test] fn it_should_default_to_zero_when_testing() { @@ -206,7 +206,7 @@ mod stopped_clock { use std::cell::RefCell; use std::time::SystemTime; - use crate::protocol::clock::clock::DurationSinceUnixEpoch; + use crate::protocol::clock::DurationSinceUnixEpoch; use crate::static_time; pub fn get_app_start_time() -> DurationSinceUnixEpoch { @@ -231,7 +231,7 @@ mod stopped_clock { mod tests { use std::time::Duration; - use crate::protocol::clock::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; + use crate::protocol::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; #[test] fn it_should_get_the_zero_start_time_when_testing() { diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs deleted file mode 100644 index 159730d2b..000000000 --- a/src/protocol/clock/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod clock; diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 127baa4eb..48fe4eb17 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use aquatic_udp_protocol::ConnectionId; -use super::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; +use super::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { ConnectionId(((current_time() / 3600) | ((remote_address.port() as u64) << 36)) as i64) diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 76ac21527..c513b48da 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -6,7 +6,7 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::Serialize; -use crate::protocol::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; use crate::AUTH_KEY_LENGTH; pub fn generate_auth_key(lifetime: Duration) -> AuthKey { @@ -85,7 +85,7 @@ impl From for Error { mod tests { use std::time::Duration; - use crate::protocol::clock::clock::{DefaultClock, StoppedTime}; + use crate::protocol::clock::{DefaultClock, StoppedTime}; use crate::tracker::key; #[test] diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index b10c97bcc..71c411b9b 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -5,10 +5,9 @@ use serde; use serde::Serialize; use crate::http::AnnounceRequest; -use crate::protocol::clock::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; -use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; +use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; +use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; -use crate::PeerId; #[derive(PartialEq, Eq, Debug, Clone, Serialize)] pub struct TorrentPeer { diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index b08f03266..7404f63af 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -5,7 +5,7 @@ use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; use crate::peer::TorrentPeer; -use crate::protocol::clock::clock::{DefaultClock, TimeNow}; +use crate::protocol::clock::{DefaultClock, TimeNow}; use crate::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone)] From b60a0bcc1f3ba08b5a667f05e9d5127276f07e30 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 9 Sep 2022 19:05:05 +0200 Subject: [PATCH 071/435] env: workspace vscode settings and extension recommendations --- .vscode/extensions.json | 6 ++++++ .vscode/settings.json | 17 +++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 .vscode/extensions.json create mode 100644 .vscode/settings.json diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 000000000..b55ef8bf6 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,6 @@ +{ + "recommendations": [ + "streetsidesoftware.code-spell-checker", + "matklad.rust-analyzer" + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..7d04b248f --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,17 @@ +{ + "cSpell.words": [ + "byteorder", + "hasher", + "leechers", + "nanos", + "rngs", + "Seedable", + "thiserror", + "torrust", + "typenum" + ], + "[rust]": { + "editor.defaultFormatter": "matklad.rust-analyzer", + "editor.formatOnSave": true + }, +} \ No newline at end of file From 2b294dd1773d06967e9f6134dfc65101c91dc8fe Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 15:23:33 +0200 Subject: [PATCH 072/435] env: vscode workspace settings, do clippy check on save. Co-authored-by: Jose Celano --- .vscode/settings.json | 1 + 1 file changed, 1 insertion(+) diff --git a/.vscode/settings.json b/.vscode/settings.json index 7d04b248f..d87732d93 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -14,4 +14,5 @@ "editor.defaultFormatter": "matklad.rust-analyzer", "editor.formatOnSave": true }, + "rust-analyzer.checkOnSave.command": "clippy", } \ No newline at end of file From 028644dd82e5fdb02741a6ee6e82a4ba6fdac5d8 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 20 Sep 2022 18:25:27 +0200 Subject: [PATCH 073/435] clock: time extent, maker, and associated traits `TimeExtent` is a simple structure that contains base increment (duration), and amount (a multiplier for the base the base increment). The resulting product of the base and the multiplier is the total duration of the time extent. `TimeExtentMaker` is a helper that generates time extents based upon the increment length and the time of the clock, this clock is specialised according to the `test` predicate with the `DefaultClockTimeExtentMaker` type. --- src/protocol/{clock.rs => clock/mod.rs} | 6 +- src/protocol/clock/timeextent.rs | 185 ++++++++++++++++++++++++ 2 files changed, 189 insertions(+), 2 deletions(-) rename src/protocol/{clock.rs => clock/mod.rs} (99%) create mode 100644 src/protocol/clock/timeextent.rs diff --git a/src/protocol/clock.rs b/src/protocol/clock/mod.rs similarity index 99% rename from src/protocol/clock.rs rename to src/protocol/clock/mod.rs index a72f3699e..cab7290e3 100644 --- a/src/protocol/clock.rs +++ b/src/protocol/clock/mod.rs @@ -1,5 +1,5 @@ use std::num::IntErrorKind; -pub use std::time::Duration; +use std::time::Duration; pub type DurationSinceUnixEpoch = Duration; @@ -240,9 +240,11 @@ mod stopped_clock { #[test] fn it_should_get_app_start_time() { - const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1662983731, 000022312); + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1662983731, 22312); assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); } } } } + +pub mod timeextent; diff --git a/src/protocol/clock/timeextent.rs b/src/protocol/clock/timeextent.rs new file mode 100644 index 000000000..eb050179e --- /dev/null +++ b/src/protocol/clock/timeextent.rs @@ -0,0 +1,185 @@ +use std::num::{IntErrorKind, TryFromIntError}; +use std::time::Duration; + +use super::{ClockType, StoppedClock, TimeNow, WorkingClock}; + +pub trait Extent: Sized + Default { + type Base; + type Multiplier; + type Product; + + fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; + + fn add(&self, add: Self::Multiplier) -> Result; + fn sub(&self, sub: Self::Multiplier) -> Result; + + fn total(&self) -> Result, TryFromIntError>; + fn total_next(&self) -> Result, TryFromIntError>; +} + +pub type TimeExtentBase = Duration; +pub type TimeExtentMultiplier = u64; +pub type TimeExtentProduct = TimeExtentBase; + +#[derive(Debug, Default, Hash, PartialEq, Eq)] +pub struct TimeExtent { + pub increment: TimeExtentBase, + pub amount: TimeExtentMultiplier, +} + +impl TimeExtent { + pub const fn from_sec(seconds: u64, amount: &TimeExtentMultiplier) -> Self { + Self { + increment: TimeExtentBase::from_secs(seconds), + amount: *amount, + } + } +} + +impl Extent for TimeExtent { + type Base = TimeExtentBase; + type Multiplier = TimeExtentMultiplier; + type Product = TimeExtentProduct; + + fn new(increment: &Self::Base, amount: &Self::Multiplier) -> Self { + Self { + increment: *increment, + amount: *amount, + } + } + + fn add(&self, add: Self::Multiplier) -> Result { + match self.amount.checked_add(add) { + None => Err(IntErrorKind::PosOverflow), + Some(amount) => Ok(Self { + increment: self.increment, + amount, + }), + } + } + + fn sub(&self, sub: Self::Multiplier) -> Result { + match self.amount.checked_sub(sub) { + None => Err(IntErrorKind::NegOverflow), + Some(amount) => Ok(Self { + increment: self.increment, + amount, + }), + } + } + + fn total(&self) -> Result, TryFromIntError> { + match u32::try_from(self.amount) { + Err(error) => Err(error), + Ok(amount) => Ok(self.increment.checked_mul(amount)), + } + } + + fn total_next(&self) -> Result, TryFromIntError> { + match u32::try_from(self.amount) { + Err(e) => Err(e), + Ok(amount) => match amount.checked_add(1) { + None => Ok(None), + Some(amount) => match self.increment.checked_mul(amount) { + None => Ok(None), + Some(extent) => Ok(Some(extent)), + }, + }, + } + } +} + +pub trait MakeTimeExtent: Sized +where + Clock: TimeNow, +{ + fn now(increment: &TimeExtentBase) -> Option> { + Clock::now() + .as_nanos() + .checked_div((*increment).as_nanos()) + .map(|amount| match TimeExtentMultiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }) + } + + fn now_add(increment: &TimeExtentBase, add_time: &Duration) -> Option> { + match Clock::add(add_time) { + None => None, + Some(time) => { + time.as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match TimeExtentMultiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }) + } + } + } + fn now_sub(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { + match Clock::sub(sub_time) { + None => None, + Some(time) => { + time.as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match TimeExtentMultiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }) + } + } + } +} + +#[derive(Debug)] +pub struct TimeExtentMaker {} + +pub type WorkingClockTimeExtentMaker = TimeExtentMaker<{ ClockType::WorkingClock as usize }>; +pub type StoppedClockTimeExtentMaker = TimeExtentMaker<{ ClockType::StoppedClock as usize }>; + +impl MakeTimeExtent for WorkingClockTimeExtentMaker {} +impl MakeTimeExtent for StoppedClockTimeExtentMaker {} + +#[cfg(not(test))] +pub type DefaultClockTimeExtentMaker = WorkingClockTimeExtentMaker; + +#[cfg(test)] +pub type DefaultClockTimeExtentMaker = StoppedClockTimeExtentMaker; + +#[cfg(test)] +mod test { + + use std::time::Duration; + + use crate::protocol::clock::timeextent::{DefaultClockTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent}; + use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; + + #[test] + fn it_should_get_the_total_duration() { + assert_eq!(TimeExtent::default().total().unwrap().unwrap(), Duration::ZERO); + + assert_eq!( + TimeExtent::from_sec(12, &12).total().unwrap().unwrap(), + Duration::from_secs(144) + ); + assert_eq!( + TimeExtent::from_sec(12, &12).total_next().unwrap().unwrap(), + Duration::from_secs(156) + ); + } + + #[test] + fn it_should_make_the_current_extent() { + assert_eq!( + DefaultClockTimeExtentMaker::now(&Duration::from_secs(2)).unwrap().unwrap(), + TimeExtent::from_sec(2, &0) + ); + + DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(12387687123)); + + assert_eq!( + DefaultClockTimeExtentMaker::now(&Duration::from_secs(2)).unwrap().unwrap(), + TimeExtent::from_sec(2, &6193843561) + ); + } +} From 0cab30cd2f60d1bf6e2d2d9296a75451f4319d53 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 14:48:34 +0200 Subject: [PATCH 074/435] time extent: add tests and some corrections * Write tests for all functions. * Rename `now_add` to `now_after` and `now_sub` to `now_before`. * Rework time extent totals calculations to work with larger numbers. --- src/protocol/clock/timeextent.rs | 437 +++++++++++++++++++++++++++---- 1 file changed, 383 insertions(+), 54 deletions(-) diff --git a/src/protocol/clock/timeextent.rs b/src/protocol/clock/timeextent.rs index eb050179e..dfd00efe7 100644 --- a/src/protocol/clock/timeextent.rs +++ b/src/protocol/clock/timeextent.rs @@ -10,11 +10,11 @@ pub trait Extent: Sized + Default { fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; - fn add(&self, add: Self::Multiplier) -> Result; - fn sub(&self, sub: Self::Multiplier) -> Result; + fn increase(&self, add: Self::Multiplier) -> Result; + fn decrease(&self, sub: Self::Multiplier) -> Result; - fn total(&self) -> Result, TryFromIntError>; - fn total_next(&self) -> Result, TryFromIntError>; + fn total(&self) -> Option>; + fn total_next(&self) -> Option>; } pub type TimeExtentBase = Duration; @@ -36,6 +36,20 @@ impl TimeExtent { } } +fn checked_duration_from_nanos(time: u128) -> Result { + const NANOS_PER_SEC: u32 = 1_000_000_000; + + let secs = time.div_euclid(NANOS_PER_SEC as u128); + let nanos = time.rem_euclid(NANOS_PER_SEC as u128); + + assert!(nanos < NANOS_PER_SEC as u128); + + match u64::try_from(secs) { + Err(error) => Err(error), + Ok(secs) => Ok(Duration::new(secs, nanos.try_into().unwrap())), + } +} + impl Extent for TimeExtent { type Base = TimeExtentBase; type Multiplier = TimeExtentMultiplier; @@ -48,7 +62,7 @@ impl Extent for TimeExtent { } } - fn add(&self, add: Self::Multiplier) -> Result { + fn increase(&self, add: Self::Multiplier) -> Result { match self.amount.checked_add(add) { None => Err(IntErrorKind::PosOverflow), Some(amount) => Ok(Self { @@ -58,7 +72,7 @@ impl Extent for TimeExtent { } } - fn sub(&self, sub: Self::Multiplier) -> Result { + fn decrease(&self, sub: Self::Multiplier) -> Result { match self.amount.checked_sub(sub) { None => Err(IntErrorKind::NegOverflow), Some(amount) => Ok(Self { @@ -68,24 +82,18 @@ impl Extent for TimeExtent { } } - fn total(&self) -> Result, TryFromIntError> { - match u32::try_from(self.amount) { - Err(error) => Err(error), - Ok(amount) => Ok(self.increment.checked_mul(amount)), - } + fn total(&self) -> Option> { + self.increment + .as_nanos() + .checked_mul(self.amount as u128) + .map(checked_duration_from_nanos) } - fn total_next(&self) -> Result, TryFromIntError> { - match u32::try_from(self.amount) { - Err(e) => Err(e), - Ok(amount) => match amount.checked_add(1) { - None => Ok(None), - Some(amount) => match self.increment.checked_mul(amount) { - None => Ok(None), - Some(extent) => Ok(Some(extent)), - }, - }, - } + fn total_next(&self) -> Option> { + self.increment + .as_nanos() + .checked_mul((self.amount as u128) + 1) + .map(checked_duration_from_nanos) } } @@ -103,7 +111,7 @@ where }) } - fn now_add(increment: &TimeExtentBase, add_time: &Duration) -> Option> { + fn now_after(increment: &TimeExtentBase, add_time: &Duration) -> Option> { match Clock::add(add_time) { None => None, Some(time) => { @@ -116,7 +124,7 @@ where } } } - fn now_sub(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { + fn now_before(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { None => None, Some(time) => { @@ -149,37 +157,358 @@ pub type DefaultClockTimeExtentMaker = StoppedClockTimeExtentMaker; #[cfg(test)] mod test { - use std::time::Duration; - - use crate::protocol::clock::timeextent::{DefaultClockTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent}; + use crate::protocol::clock::timeextent::{ + checked_duration_from_nanos, DefaultClockTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, + TimeExtentProduct, + }; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; - #[test] - fn it_should_get_the_total_duration() { - assert_eq!(TimeExtent::default().total().unwrap().unwrap(), Duration::ZERO); - - assert_eq!( - TimeExtent::from_sec(12, &12).total().unwrap().unwrap(), - Duration::from_secs(144) - ); - assert_eq!( - TimeExtent::from_sec(12, &12).total_next().unwrap().unwrap(), - Duration::from_secs(156) - ); - } - - #[test] - fn it_should_make_the_current_extent() { - assert_eq!( - DefaultClockTimeExtentMaker::now(&Duration::from_secs(2)).unwrap().unwrap(), - TimeExtent::from_sec(2, &0) - ); - - DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(12387687123)); - - assert_eq!( - DefaultClockTimeExtentMaker::now(&Duration::from_secs(2)).unwrap().unwrap(), - TimeExtent::from_sec(2, &6193843561) - ); + const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239812388723); + + mod fn_checked_duration_from_nanos { + use std::time::Duration; + + use super::*; + + const NANOS_PER_SEC: u32 = 1_000_000_000; + + #[test] + fn it_should_return_a_duration() { + assert_eq!(checked_duration_from_nanos(0).unwrap(), Duration::from_micros(0)); + assert_eq!( + checked_duration_from_nanos(1232143214343432).unwrap(), + Duration::from_nanos(1232143214343432) + ); + assert_eq!( + checked_duration_from_nanos(u64::MAX as u128).unwrap(), + Duration::from_nanos(u64::MAX) + ); + assert_eq!( + checked_duration_from_nanos(TIME_EXTENT_VAL.amount as u128 * NANOS_PER_SEC as u128).unwrap(), + Duration::from_secs(TIME_EXTENT_VAL.amount) + ); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + assert_eq!( + checked_duration_from_nanos(u128::MAX).unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod time_extent_from_sec { + use super::*; + + #[test] + fn it_should_make_time_extent() { + assert_eq!(TIME_EXTENT_VAL.increment, TimeExtentBase::from_secs(2)); + assert_eq!(TIME_EXTENT_VAL.amount, 239812388723); + } + } + + mod time_extent_default { + use super::*; + + #[test] + fn it_should_make_time_extent() { + let time_extent_default = TimeExtent::default(); + assert_eq!(time_extent_default.increment, TimeExtentBase::ZERO); + assert_eq!(time_extent_default.amount, 0); + } + } + + mod time_extent_new { + use super::*; + + #[test] + fn it_should_make_time_extent() { + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + assert_eq!(time_extent.increment, TimeExtentBase::from_millis(2)); + assert_eq!(time_extent.amount, TIME_EXTENT_VAL.amount); + } + } + + mod time_extent_increase { + use std::num::IntErrorKind; + + use super::*; + + #[test] + fn it_should_return_increased() { + let time_extent_default = TimeExtent::default(); + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + + let time_extent_default_increase = TimeExtent { + increment: TimeExtentBase::ZERO, + amount: 50, + }; + let time_extent_increase = TimeExtent { + increment: TimeExtentBase::from_millis(2), + amount: TIME_EXTENT_VAL.amount + 50, + }; + let time_extent_from_sec_increase = TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount + 50, + }; + + assert_eq!(time_extent_default.increase(50).unwrap(), time_extent_default_increase); + assert_eq!(time_extent.increase(50).unwrap(), time_extent_increase); + assert_eq!(TIME_EXTENT_VAL.increase(50).unwrap(), time_extent_from_sec_increase); + } + + #[test] + fn it_should_postive_overflow() { + assert_eq!(TIME_EXTENT_VAL.increase(u64::MAX), Err(IntErrorKind::PosOverflow)); + } + } + + mod time_extent_decrease { + use std::num::IntErrorKind; + + use super::*; + + #[test] + fn it_should_return_decreased() { + let time_extent_default = TimeExtent::default(); + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + + let time_extent_default_decrease = TimeExtent { + increment: TimeExtentBase::ZERO, + amount: 0, + }; + let time_extent_decrease = TimeExtent { + increment: TimeExtentBase::from_millis(2), + amount: TIME_EXTENT_VAL.amount - 50, + }; + let time_extent_from_sec_decrease = TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount - 50, + }; + + assert_eq!(time_extent_default.decrease(0).unwrap(), time_extent_default_decrease); + assert_eq!(time_extent.decrease(50).unwrap(), time_extent_decrease); + assert_eq!(TIME_EXTENT_VAL.decrease(50).unwrap(), time_extent_from_sec_decrease); + } + + #[test] + fn it_should_return_an_negitive_overflow() { + assert_eq!(TIME_EXTENT_VAL.decrease(u64::MAX), Err(IntErrorKind::NegOverflow)); + } + } + + mod time_extent_total { + use super::*; + + #[test] + fn it_should_return_total() { + let time_extent_default = TimeExtent::default(); + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount / 1000)); + + assert_eq!(time_extent_default.total().unwrap().unwrap(), TimeExtentProduct::ZERO); + assert_eq!( + time_extent.total().unwrap().unwrap(), + TimeExtentProduct::new(479624, 776000000) + ); + assert_eq!( + TIME_EXTENT_VAL.total().unwrap().unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + } + + #[test] + fn it_should_return_none() { + let time_extent_max = TimeExtent { + increment: TimeExtentBase::MAX, + amount: u64::MAX as u64, + }; + assert_eq!(time_extent_max.total(), None); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + let time_extent_max = TimeExtent { + increment: TimeExtentBase::MAX, + amount: 2, + }; + assert_eq!( + time_extent_max.total().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod time_extent_total_next { + use super::*; + + #[test] + fn it_should_get_the_time_extent_total_next() { + let time_extent_default = TimeExtent::default(); + let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + + assert_eq!( + time_extent_default.total_next().unwrap().unwrap(), + TimeExtentProduct::from_secs(0) + ); + assert_eq!( + time_extent.total_next().unwrap().unwrap(), + TimeExtentProduct::new(479624777, 448000000) + ); + assert_eq!( + TIME_EXTENT_VAL.total_next().unwrap().unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) + ); + } + + #[test] + fn it_should_return_none() { + let time_extent_max = TimeExtent { + increment: TimeExtentBase::MAX, + amount: u64::MAX as u64, + }; + assert_eq!(time_extent_max.total_next(), None); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + let time_extent_max = TimeExtent { + increment: TimeExtentBase::MAX, + amount: 2, + }; + assert_eq!( + time_extent_max.total_next().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod make_time_extent_now { + use super::*; + + #[test] + fn it_should_return_a_time_extent() { + assert_eq!( + DefaultClockTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: 0 + } + ); + + DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + + assert_eq!( + DefaultClockTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TIME_EXTENT_VAL + ); + } + + #[test] + fn it_should_return_none() { + assert_eq!(DefaultClockTimeExtentMaker::now(&TimeExtentBase::ZERO), None); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultClockTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod make_time_extent_now_after { + use std::time::Duration; + + use super::*; + + #[test] + fn it_should_return_a_time_extent() { + assert_eq!( + DefaultClockTimeExtentMaker::now_after( + &TIME_EXTENT_VAL.increment, + &Duration::from_secs(TIME_EXTENT_VAL.amount * 2) + ) + .unwrap() + .unwrap(), + TIME_EXTENT_VAL + ); + } + + #[test] + fn it_should_return_none() { + assert_eq!( + DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), + None + ); + + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), + None + ); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + mod make_time_extent_now_before { + use std::time::Duration; + + use super::*; + + #[test] + fn it_should_return_a_time_extent() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + + assert_eq!( + DefaultClockTimeExtentMaker::now_before( + &TimeExtentBase::from_secs(u32::MAX as u64), + &Duration::from_secs(u32::MAX as u64) + ) + .unwrap() + .unwrap(), + TimeExtent { + increment: TimeExtentBase::from_secs(u32::MAX as u64), + amount: 4294967296 + } + ); + } + + #[test] + fn it_should_return_none() { + assert_eq!( + DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), + None + ); + + assert_eq!( + DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), + None + ); + } + + #[test] + fn it_should_return_tryfrom_int_error() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } } From 8653f9a509431433c4aabd004cdf419ffd0ef62d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 15:02:11 +0200 Subject: [PATCH 075/435] time extent: rename `TimeExtentMaker` for clarity Rename: from `DefaultClockTimeExtentMaker` to `DefaultTimeExtentMaker`, and the associated types. Co-authored-by: Jose Celano --- src/protocol/clock/timeextent.rs | 48 ++++++++++++++------------------ 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/src/protocol/clock/timeextent.rs b/src/protocol/clock/timeextent.rs index dfd00efe7..0356b7f39 100644 --- a/src/protocol/clock/timeextent.rs +++ b/src/protocol/clock/timeextent.rs @@ -142,23 +142,23 @@ where #[derive(Debug)] pub struct TimeExtentMaker {} -pub type WorkingClockTimeExtentMaker = TimeExtentMaker<{ ClockType::WorkingClock as usize }>; -pub type StoppedClockTimeExtentMaker = TimeExtentMaker<{ ClockType::StoppedClock as usize }>; +pub type WorkingTimeExtentMaker = TimeExtentMaker<{ ClockType::WorkingClock as usize }>; +pub type StoppedTimeExtentMaker = TimeExtentMaker<{ ClockType::StoppedClock as usize }>; -impl MakeTimeExtent for WorkingClockTimeExtentMaker {} -impl MakeTimeExtent for StoppedClockTimeExtentMaker {} +impl MakeTimeExtent for WorkingTimeExtentMaker {} +impl MakeTimeExtent for StoppedTimeExtentMaker {} #[cfg(not(test))] -pub type DefaultClockTimeExtentMaker = WorkingClockTimeExtentMaker; +pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; #[cfg(test)] -pub type DefaultClockTimeExtentMaker = StoppedClockTimeExtentMaker; +pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; #[cfg(test)] mod test { use crate::protocol::clock::timeextent::{ - checked_duration_from_nanos, DefaultClockTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, + checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, TimeExtentProduct, }; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; @@ -389,7 +389,7 @@ mod test { #[test] fn it_should_return_a_time_extent() { assert_eq!( - DefaultClockTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), TimeExtent { increment: TIME_EXTENT_VAL.increment, amount: 0 @@ -399,21 +399,21 @@ mod test { DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); assert_eq!( - DefaultClockTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), TIME_EXTENT_VAL ); } #[test] fn it_should_return_none() { - assert_eq!(DefaultClockTimeExtentMaker::now(&TimeExtentBase::ZERO), None); + assert_eq!(DefaultTimeExtentMaker::now(&TimeExtentBase::ZERO), None); } #[test] fn it_should_return_tryfrom_int_error() { DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultClockTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) + DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -429,12 +429,9 @@ mod test { #[test] fn it_should_return_a_time_extent() { assert_eq!( - DefaultClockTimeExtentMaker::now_after( - &TIME_EXTENT_VAL.increment, - &Duration::from_secs(TIME_EXTENT_VAL.amount * 2) - ) - .unwrap() - .unwrap(), + DefaultTimeExtentMaker::now_after(&TIME_EXTENT_VAL.increment, &Duration::from_secs(TIME_EXTENT_VAL.amount * 2)) + .unwrap() + .unwrap(), TIME_EXTENT_VAL ); } @@ -442,22 +439,19 @@ mod test { #[test] fn it_should_return_none() { assert_eq!( - DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), + DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), None ); DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); } #[test] fn it_should_return_tryfrom_int_error() { DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultClockTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -474,7 +468,7 @@ mod test { DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultClockTimeExtentMaker::now_before( + DefaultTimeExtentMaker::now_before( &TimeExtentBase::from_secs(u32::MAX as u64), &Duration::from_secs(u32::MAX as u64) ) @@ -490,12 +484,12 @@ mod test { #[test] fn it_should_return_none() { assert_eq!( - DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), + DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), None ); assert_eq!( - DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), + DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), None ); } @@ -504,7 +498,7 @@ mod test { fn it_should_return_tryfrom_int_error() { DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultClockTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() From 3d743edec410f7d0e96db37d66e01cab8b248ec4 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 15:28:32 +0200 Subject: [PATCH 076/435] time extent: use snake_case for module name Rename: from timeextent to time_extent. Co-authored-by: Jose Celano --- src/protocol/clock/mod.rs | 2 +- src/protocol/clock/{timeextent.rs => time_extent.rs} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename src/protocol/clock/{timeextent.rs => time_extent.rs} (99%) diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs index cab7290e3..4e15950e6 100644 --- a/src/protocol/clock/mod.rs +++ b/src/protocol/clock/mod.rs @@ -247,4 +247,4 @@ mod stopped_clock { } } -pub mod timeextent; +pub mod time_extent; diff --git a/src/protocol/clock/timeextent.rs b/src/protocol/clock/time_extent.rs similarity index 99% rename from src/protocol/clock/timeextent.rs rename to src/protocol/clock/time_extent.rs index 0356b7f39..85b5257ad 100644 --- a/src/protocol/clock/timeextent.rs +++ b/src/protocol/clock/time_extent.rs @@ -157,7 +157,7 @@ pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; #[cfg(test)] mod test { - use crate::protocol::clock::timeextent::{ + use crate::protocol::clock::time_extent::{ checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, TimeExtentProduct, }; From e785a7fd8e0d0a393b1d312d07178ebef43dda25 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 18:08:13 +0200 Subject: [PATCH 077/435] time extent: overhaul tests naming scheme --- src/protocol/clock/time_extent.rs | 558 ++++++++++++++++-------------- 1 file changed, 302 insertions(+), 256 deletions(-) diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index 85b5257ad..d0713645b 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -27,6 +27,15 @@ pub struct TimeExtent { pub amount: TimeExtentMultiplier, } +pub const ZERO: TimeExtent = TimeExtent { + increment: TimeExtentBase::ZERO, + amount: TimeExtentMultiplier::MIN, +}; +pub const MAX: TimeExtent = TimeExtent { + increment: TimeExtentBase::MAX, + amount: TimeExtentMultiplier::MAX, +}; + impl TimeExtent { pub const fn from_sec(seconds: u64, amount: &TimeExtentMultiplier) -> Self { Self { @@ -159,7 +168,7 @@ mod test { use crate::protocol::clock::time_extent::{ checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, - TimeExtentProduct, + TimeExtentMultiplier, TimeExtentProduct, MAX, ZERO, }; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; @@ -173,8 +182,12 @@ mod test { const NANOS_PER_SEC: u32 = 1_000_000_000; #[test] - fn it_should_return_a_duration() { - assert_eq!(checked_duration_from_nanos(0).unwrap(), Duration::from_micros(0)); + fn it_should_give_zero_for_zero_input() { + assert_eq!(checked_duration_from_nanos(0).unwrap(), Duration::ZERO); + } + + #[test] + fn it_should_be_the_same_as_duration_implementation_for_u64_numbers() { assert_eq!( checked_duration_from_nanos(1232143214343432).unwrap(), Duration::from_nanos(1232143214343432) @@ -183,6 +196,10 @@ mod test { checked_duration_from_nanos(u64::MAX as u128).unwrap(), Duration::from_nanos(u64::MAX) ); + } + + #[test] + fn it_should_work_for_some_numbers_larger_than_u64() { assert_eq!( checked_duration_from_nanos(TIME_EXTENT_VAL.amount as u128 * NANOS_PER_SEC as u128).unwrap(), Duration::from_secs(TIME_EXTENT_VAL.amount) @@ -190,7 +207,7 @@ mod test { } #[test] - fn it_should_return_tryfrom_int_error() { + fn it_should_fail_for_numbers_that_are_too_large() { assert_eq!( checked_duration_from_nanos(u128::MAX).unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -198,311 +215,340 @@ mod test { } } - mod time_extent_from_sec { + mod time_extent { use super::*; - #[test] - fn it_should_make_time_extent() { - assert_eq!(TIME_EXTENT_VAL.increment, TimeExtentBase::from_secs(2)); - assert_eq!(TIME_EXTENT_VAL.amount, 239812388723); - } - } + mod fn_default { - mod time_extent_default { - use super::*; + use super::*; - #[test] - fn it_should_make_time_extent() { - let time_extent_default = TimeExtent::default(); - assert_eq!(time_extent_default.increment, TimeExtentBase::ZERO); - assert_eq!(time_extent_default.amount, 0); + #[test] + fn it_should_default_initialize_to_zero() { + assert_eq!(TimeExtent::default(), ZERO); + } } - } - mod time_extent_new { - use super::*; + mod fn_from_sec { + use super::*; - #[test] - fn it_should_make_time_extent() { - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); - assert_eq!(time_extent.increment, TimeExtentBase::from_millis(2)); - assert_eq!(time_extent.amount, TIME_EXTENT_VAL.amount); + #[test] + fn it_should_make_empty_for_zero() { + assert_eq!(TimeExtent::from_sec(u64::MIN, &TimeExtentMultiplier::MIN), ZERO); + } + #[test] + fn it_should_make_from_seconds() { + assert_eq!( + TimeExtent::from_sec(TIME_EXTENT_VAL.increment.as_secs(), &TIME_EXTENT_VAL.amount), + TIME_EXTENT_VAL + ); + } } - } - mod time_extent_increase { - use std::num::IntErrorKind; + mod fn_new { + use super::*; - use super::*; + #[test] + fn it_should_make_empty_for_zero() { + assert_eq!(TimeExtent::new(&TimeExtentBase::ZERO, &TimeExtentMultiplier::MIN), ZERO); + } - #[test] - fn it_should_return_increased() { - let time_extent_default = TimeExtent::default(); - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); - - let time_extent_default_increase = TimeExtent { - increment: TimeExtentBase::ZERO, - amount: 50, - }; - let time_extent_increase = TimeExtent { - increment: TimeExtentBase::from_millis(2), - amount: TIME_EXTENT_VAL.amount + 50, - }; - let time_extent_from_sec_increase = TimeExtent { - increment: TIME_EXTENT_VAL.increment, - amount: TIME_EXTENT_VAL.amount + 50, - }; - - assert_eq!(time_extent_default.increase(50).unwrap(), time_extent_default_increase); - assert_eq!(time_extent.increase(50).unwrap(), time_extent_increase); - assert_eq!(TIME_EXTENT_VAL.increase(50).unwrap(), time_extent_from_sec_increase); + #[test] + fn it_should_make_new() { + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount), + TimeExtent { + increment: TimeExtentBase::from_millis(2), + amount: TIME_EXTENT_VAL.amount + } + ); + } } - #[test] - fn it_should_postive_overflow() { - assert_eq!(TIME_EXTENT_VAL.increase(u64::MAX), Err(IntErrorKind::PosOverflow)); - } - } + mod fn_increase { + use std::num::IntErrorKind; - mod time_extent_decrease { - use std::num::IntErrorKind; + use super::*; - use super::*; + #[test] + fn it_should_not_increase_for_zero() { + assert_eq!(ZERO.increase(0).unwrap(), ZERO); + } - #[test] - fn it_should_return_decreased() { - let time_extent_default = TimeExtent::default(); - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); - - let time_extent_default_decrease = TimeExtent { - increment: TimeExtentBase::ZERO, - amount: 0, - }; - let time_extent_decrease = TimeExtent { - increment: TimeExtentBase::from_millis(2), - amount: TIME_EXTENT_VAL.amount - 50, - }; - let time_extent_from_sec_decrease = TimeExtent { - increment: TIME_EXTENT_VAL.increment, - amount: TIME_EXTENT_VAL.amount - 50, - }; - - assert_eq!(time_extent_default.decrease(0).unwrap(), time_extent_default_decrease); - assert_eq!(time_extent.decrease(50).unwrap(), time_extent_decrease); - assert_eq!(TIME_EXTENT_VAL.decrease(50).unwrap(), time_extent_from_sec_decrease); - } + #[test] + fn it_should_increase() { + assert_eq!( + TIME_EXTENT_VAL.increase(50).unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount + 50, + } + ); + } - #[test] - fn it_should_return_an_negitive_overflow() { - assert_eq!(TIME_EXTENT_VAL.decrease(u64::MAX), Err(IntErrorKind::NegOverflow)); + #[test] + fn it_should_fail_when_attempting_to_increase_beyond_bounds() { + assert_eq!(TIME_EXTENT_VAL.increase(u64::MAX), Err(IntErrorKind::PosOverflow)); + } } - } - mod time_extent_total { - use super::*; + mod fn_decrease { + use std::num::IntErrorKind; - #[test] - fn it_should_return_total() { - let time_extent_default = TimeExtent::default(); - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount / 1000)); + use super::*; - assert_eq!(time_extent_default.total().unwrap().unwrap(), TimeExtentProduct::ZERO); - assert_eq!( - time_extent.total().unwrap().unwrap(), - TimeExtentProduct::new(479624, 776000000) - ); - assert_eq!( - TIME_EXTENT_VAL.total().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) - ); - } + #[test] + fn it_should_not_decrease_for_zero() { + assert_eq!(ZERO.decrease(0).unwrap(), ZERO); + } - #[test] - fn it_should_return_none() { - let time_extent_max = TimeExtent { - increment: TimeExtentBase::MAX, - amount: u64::MAX as u64, - }; - assert_eq!(time_extent_max.total(), None); - } + #[test] + fn it_should_decrease() { + assert_eq!( + TIME_EXTENT_VAL.decrease(50).unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount - 50, + } + ); + } - #[test] - fn it_should_return_tryfrom_int_error() { - let time_extent_max = TimeExtent { - increment: TimeExtentBase::MAX, - amount: 2, - }; - assert_eq!( - time_extent_max.total().unwrap().unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); + #[test] + fn it_should_fail_when_attempting_to_decrease_beyond_bounds() { + assert_eq!(TIME_EXTENT_VAL.decrease(u64::MAX), Err(IntErrorKind::NegOverflow)); + } } - } - mod time_extent_total_next { - use super::*; + mod fn_total { + use super::*; - #[test] - fn it_should_get_the_time_extent_total_next() { - let time_extent_default = TimeExtent::default(); - let time_extent = TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount); + #[test] + fn it_should_be_zero_for_zero() { + assert_eq!(ZERO.total().unwrap().unwrap(), TimeExtentProduct::ZERO); + } - assert_eq!( - time_extent_default.total_next().unwrap().unwrap(), - TimeExtentProduct::from_secs(0) - ); - assert_eq!( - time_extent.total_next().unwrap().unwrap(), - TimeExtentProduct::new(479624777, 448000000) - ); - assert_eq!( - TIME_EXTENT_VAL.total_next().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) - ); - } + #[test] + fn it_should_give_a_total() { + assert_eq!( + TIME_EXTENT_VAL.total().unwrap().unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + .total() + .unwrap() + .unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX)) + .total() + .unwrap() + .unwrap(), + TimeExtentProduct::from_secs(u64::MAX) + ); + } - #[test] - fn it_should_return_none() { - let time_extent_max = TimeExtent { - increment: TimeExtentBase::MAX, - amount: u64::MAX as u64, - }; - assert_eq!(time_extent_max.total_next(), None); - } + #[test] + fn it_should_fail_when_too_large() { + assert_eq!(MAX.total(), None); + } - #[test] - fn it_should_return_tryfrom_int_error() { - let time_extent_max = TimeExtent { - increment: TimeExtentBase::MAX, - amount: 2, - }; - assert_eq!( - time_extent_max.total_next().unwrap().unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); + #[test] + fn it_should_fail_when_product_is_too_large() { + let time_extent = TimeExtent { + increment: MAX.increment, + amount: 2, + }; + assert_eq!( + time_extent.total().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } - } - mod make_time_extent_now { - use super::*; + mod fn_total_next { + use super::*; - #[test] - fn it_should_return_a_time_extent() { - assert_eq!( - DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), - TimeExtent { - increment: TIME_EXTENT_VAL.increment, - amount: 0 - } - ); - - DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + #[test] + fn it_should_be_zero_for_zero() { + assert_eq!(ZERO.total_next().unwrap().unwrap(), TimeExtentProduct::ZERO); + } - assert_eq!( - DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), - TIME_EXTENT_VAL - ); - } + #[test] + fn it_should_give_a_total() { + assert_eq!( + TIME_EXTENT_VAL.total_next().unwrap().unwrap(), + TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) + ); + + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + .total_next() + .unwrap() + .unwrap(), + TimeExtentProduct::new( + TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount), + TimeExtentBase::from_millis(2).as_nanos().try_into().unwrap() + ) + ); + + assert_eq!( + TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX - 1)) + .total_next() + .unwrap() + .unwrap(), + TimeExtentProduct::from_secs(u64::MAX) + ); + } - #[test] - fn it_should_return_none() { - assert_eq!(DefaultTimeExtentMaker::now(&TimeExtentBase::ZERO), None); - } + #[test] + fn it_should_fail_when_too_large() { + assert_eq!(MAX.total_next(), None); + } - #[test] - fn it_should_return_tryfrom_int_error() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) - .unwrap() - .unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); + #[test] + fn it_should_fail_when_product_is_too_large() { + let time_extent = TimeExtent { + increment: MAX.increment, + amount: 2, + }; + assert_eq!( + time_extent.total_next().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } } - mod make_time_extent_now_after { - use std::time::Duration; - + mod make_time_extent { use super::*; - #[test] - fn it_should_return_a_time_extent() { - assert_eq!( - DefaultTimeExtentMaker::now_after(&TIME_EXTENT_VAL.increment, &Duration::from_secs(TIME_EXTENT_VAL.amount * 2)) - .unwrap() - .unwrap(), - TIME_EXTENT_VAL - ); - } + mod fn_now { + use super::*; + + #[test] + fn it_should_give_a_time_extent() { + assert_eq!( + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: 0 + } + ); + + DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + + assert_eq!( + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TIME_EXTENT_VAL + ); + } - #[test] - fn it_should_return_none() { - assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + #[test] + fn it_should_fail_for_zero() { + assert_eq!(DefaultTimeExtentMaker::now(&TimeExtentBase::ZERO), None); + } - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } - #[test] - fn it_should_return_tryfrom_int_error() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + mod fn_now_after { + use std::time::Duration; + + use super::*; + + #[test] + fn it_should_give_a_time_extent() { + assert_eq!( + DefaultTimeExtentMaker::now_after( + &TIME_EXTENT_VAL.increment, + &Duration::from_secs(TIME_EXTENT_VAL.amount * 2) + ) .unwrap() - .unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); - } - } - mod make_time_extent_now_before { - use std::time::Duration; + .unwrap(), + TIME_EXTENT_VAL + ); + } - use super::*; + #[test] + fn it_should_fail_for_zero() { + assert_eq!( + DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), + None + ); - #[test] - fn it_should_return_a_time_extent() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); + } - assert_eq!( - DefaultTimeExtentMaker::now_before( - &TimeExtentBase::from_secs(u32::MAX as u64), - &Duration::from_secs(u32::MAX as u64) - ) - .unwrap() - .unwrap(), - TimeExtent { - increment: TimeExtentBase::from_secs(u32::MAX as u64), - amount: 4294967296 - } - ); + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } + mod fn_now_before { + use std::time::Duration; - #[test] - fn it_should_return_none() { - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + use super::*; - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), - None - ); - } + #[test] + fn it_should_give_a_time_extent() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - #[test] - fn it_should_return_tryfrom_int_error() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + assert_eq!( + DefaultTimeExtentMaker::now_before( + &TimeExtentBase::from_secs(u32::MAX as u64), + &Duration::from_secs(u32::MAX as u64) + ) .unwrap() - .unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); + .unwrap(), + TimeExtent { + increment: TimeExtentBase::from_secs(u32::MAX as u64), + amount: 4294967296 + } + ); + } + + #[test] + fn it_should_fail_for_zero() { + assert_eq!( + DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), + None + ); + + assert_eq!( + DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), + None + ); + } + + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } } } } From 7abe0f5bde1e209553d1a1e2d6fe644cd46a9395 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 Sep 2022 16:56:51 +0100 Subject: [PATCH 078/435] test: add test for udp::handlers --- src/main.rs | 6 +- src/tracker/statistics.rs | 54 +- src/tracker/torrent.rs | 2 +- src/tracker/tracker.rs | 36 +- src/udp/handlers.rs | 1048 +++++++++++++++++++++++++++++++++++++ 5 files changed, 1119 insertions(+), 27 deletions(-) diff --git a/src/main.rs b/src/main.rs index 01121052a..47896ff43 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use log::info; +use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::tracker::TorrentTracker; use torrust_tracker::{logging, setup, static_time, Configuration}; @@ -19,8 +20,11 @@ async fn main() { } }; + // Initialize stats tracker + let stats_tracker = StatsTracker::new_running_instance(); + // Initialize Torrust tracker - let tracker = match TorrentTracker::new(config.clone()) { + let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker)) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 85a2dbae9..cf801e1df 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -1,12 +1,12 @@ +use async_trait::async_trait; use std::sync::Arc; - use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::Sender; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub enum TrackerStatisticsEvent { Tcp4Announce, Tcp4Scrape, @@ -61,6 +61,12 @@ pub struct StatsTracker { } impl StatsTracker { + pub fn new_running_instance() -> Self { + let mut stats_tracker = Self::new(); + stats_tracker.run_worker(); + stats_tracker + } + pub fn new() -> Self { Self { channel_sender: None, @@ -68,18 +74,6 @@ impl StatsTracker { } } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats.read().await - } - - pub async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { - if let Some(tx) = &self.channel_sender { - Some(tx.send(event).await) - } else { - None - } - } - pub fn run_worker(&mut self) { let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); @@ -134,3 +128,35 @@ impl StatsTracker { }); } } + +#[async_trait] +pub trait TrackerStatisticsEventSender: Sync + Send { + async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>>; +} + +#[async_trait] +impl TrackerStatisticsEventSender for StatsTracker { + async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { + if let Some(tx) = &self.channel_sender { + Some(tx.send(event).await) + } else { + None + } + } +} + +#[async_trait] +pub trait TrackerStatisticsRepository: Sync + Send { + async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics>; +} + +#[async_trait] +impl TrackerStatisticsRepository for StatsTracker { + async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + self.stats.read().await + } +} + +pub trait TrackerStatsService: TrackerStatisticsEventSender + TrackerStatisticsRepository {} + +impl TrackerStatsService for StatsTracker {} diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 7404f63af..f12f0a622 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -8,7 +8,7 @@ use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, TimeNow}; use crate::{PeerId, MAX_SCRAPE_TORRENTS}; -#[derive(Serialize, Deserialize, Clone)] +#[derive(Serialize, Deserialize, Clone, Debug)] pub struct TorrentEntry { #[serde(skip)] pub peers: std::collections::BTreeMap, diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 9a242e41a..5499eebeb 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -12,7 +12,7 @@ use crate::databases::database::Database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; -use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; +use crate::statistics::{TrackerStatistics, TrackerStatisticsEvent, TrackerStatsService}; use crate::tracker::key; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; @@ -24,19 +24,13 @@ pub struct TorrentTracker { keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, - stats_tracker: StatsTracker, + stats_tracker: Box, database: Box, } impl TorrentTracker { - pub fn new(config: Arc) -> Result { + pub fn new(config: Arc, stats_tracker: Box) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; - let mut stats_tracker = StatsTracker::new(); - - // starts a thread for updating tracker stats - if config.tracker_usage_statistics { - stats_tracker.run_worker(); - } Ok(TorrentTracker { config: config.clone(), @@ -96,11 +90,20 @@ impl TorrentTracker { // Adding torrents is not relevant to public trackers. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.add_info_hash_to_whitelist(info_hash.clone()).await?; - self.whitelist.write().await.insert(info_hash.clone()); + self.add_torrent_to_database_whitelist(info_hash).await?; + self.add_torrent_to_memory_whitelist(info_hash).await; Ok(()) } + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.add_info_hash_to_whitelist(*info_hash).await?; + Ok(()) + } + + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + // Removing torrents is not relevant to public trackers. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { self.database.remove_info_hash_from_whitelist(info_hash.clone()).await?; @@ -177,6 +180,7 @@ impl TorrentTracker { Ok(()) } + /// Get all torrent peers for a given torrent filtering out the peer with the client address pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; @@ -186,6 +190,16 @@ impl TorrentTracker { } } + /// Get all torrent peers for a given torrent + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), + } + } + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { let mut torrents = self.torrents.write().await; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 907dac0bc..3c4074eae 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -236,3 +236,1051 @@ fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { message: message.into(), }) } + +#[cfg(test)] +mod tests { + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + }; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use async_trait::async_trait; + use tokio::sync::{mpsc::error::SendError, RwLock, RwLockReadGuard}; + + use crate::{ + mode::TrackerMode, + peer::TorrentPeer, + protocol::clock::{DefaultClock, Time}, + statistics::{ + StatsTracker, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatisticsRepository, + TrackerStatsService, + }, + tracker::tracker::TorrentTracker, + Configuration, PeerId, + }; + + fn default_tracker_config() -> Arc { + Arc::new(Configuration::default()) + } + + fn initialized_public_tracker() -> Arc { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + } + + fn initialized_private_tracker() -> Arc { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + } + + fn initialized_whitelisted_tracker() -> Arc { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + } + + fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() + } + + fn sample_ipv6_remote_addr() -> SocketAddr { + sample_ipv6_socket_address() + } + + fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + fn sample_ipv6_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + } + + struct TorrentPeerBuilder { + peer: TorrentPeer, + } + + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = TorrentPeer { + peer_id: PeerId([255u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DefaultClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_peer_addr(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + pub fn into(self) -> TorrentPeer { + self.peer + } + } + + struct TrackerStatsServiceMock { + stats: Arc>, + expected_event: Option, + } + + impl TrackerStatsServiceMock { + fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(TrackerStatistics::new())), + expected_event: None, + } + } + + fn should_throw_event(&mut self, expected_event: TrackerStatisticsEvent) { + self.expected_event = Some(expected_event); + } + } + + #[async_trait] + impl TrackerStatisticsEventSender for TrackerStatsServiceMock { + async fn send_event(&self, _event: TrackerStatisticsEvent) -> Option>> { + if self.expected_event.is_some() { + assert_eq!(_event, *self.expected_event.as_ref().unwrap()); + } + None + } + } + + #[async_trait] + impl TrackerStatisticsRepository for TrackerStatsServiceMock { + async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + self.stats.read().await + } + } + + impl TrackerStatsService for TrackerStatsServiceMock {} + + struct TrackerConfigurationBuilder { + configuration: Configuration, + } + + impl TrackerConfigurationBuilder { + pub fn default() -> TrackerConfigurationBuilder { + let default_configuration = Configuration::default(); + TrackerConfigurationBuilder { + configuration: default_configuration, + } + } + + pub fn with_external_ip(mut self, external_ip: &str) -> Self { + self.configuration.external_ip = Some(external_ip.to_owned()); + self + } + + pub fn with_mode(mut self, mode: TrackerMode) -> Self { + self.configuration.mode = mode; + self + } + + pub fn into(self) -> Configuration { + self.configuration + } + } + + mod connect_request { + + use std::sync::Arc; + + use crate::{ + protocol::utils::get_connection_id, + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::{ + handle_connect, + handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}, + }, + }; + use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + + use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + + fn sample_connect_request() -> ConnectRequest { + ConnectRequest { + transaction_id: TransactionId(0i32), + } + } + + #[tokio::test] + async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let request = ConnectRequest { + transaction_id: TransactionId(0i32), + }; + + let response = handle_connect(sample_ipv4_remote_addr(), &request, initialized_public_tracker()) + .await + .unwrap(); + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: get_connection_id(&sample_ipv4_remote_addr()), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id() { + let request = ConnectRequest { + transaction_id: TransactionId(0i32), + }; + + let response = handle_connect(sample_ipv4_remote_addr(), &request, initialized_public_tracker()) + .await + .unwrap(); + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: get_connection_id(&sample_ipv4_remote_addr()), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + let client_socket_address = sample_ipv4_socket_address(); + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Connect); + + let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) + .await + .unwrap(); + } + + #[tokio::test] + async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Connect); + + let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) + .await + .unwrap(); + } + } + + mod announce_request { + + use std::net::Ipv4Addr; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, + }; + + use crate::{protocol::utils::get_connection_id, udp::handlers::tests::sample_ipv4_remote_addr}; + + struct AnnounceRequestBuilder { + request: AnnounceRequest, + } + + impl AnnounceRequestBuilder { + pub fn default() -> AnnounceRequestBuilder { + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); + + let default_request = AnnounceRequest { + connection_id: get_connection_id(&sample_ipv4_remote_addr()), + transaction_id: TransactionId(0i32), + info_hash: info_hash_aquatic, + peer_id: AquaticPeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(client_ip), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client_port), + }; + AnnounceRequestBuilder { + request: default_request, + } + } + + pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { + self.request.info_hash = info_hash; + self + } + + pub fn with_peer_id(mut self, peer_id: AquaticPeerId) -> Self { + self.request.peer_id = peer_id; + self + } + + pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { + self.request.ip_address = Some(ip_address); + self + } + + pub fn with_port(mut self, port: u16) -> Self { + self.request.port = Port(port); + self + } + + pub fn into(self) -> AnnounceRequest { + self.request + } + } + + mod using_ipv4 { + + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + }; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, + Response, ResponsePeer, + }; + + use crate::{ + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::{ + handle_announce, + handlers::tests::{ + announce_request::AnnounceRequestBuilder, default_tracker_config, initialized_public_tracker, + sample_ipv4_socket_address, TorrentPeerBuilder, TrackerStatsServiceMock, + }, + }, + PeerId, + }; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let tracker = initialized_public_tracker(); + + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let expected_peer = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .into(); + + assert_eq!(peers[0], expected_peer); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let request = AnnounceRequestBuilder::default().into(); + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let response = handle_announce(remote_addr, &request, initialized_public_tracker()) + .await + .unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32), + leechers: NumberOfPeers(0i32), + seeders: NumberOfPeers(1i32), + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let tracker = initialized_public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = Ipv4Addr::new(126, 0, 0, 1); + let remote_client_port = 8081; + let peer_address = Ipv4Addr::new(126, 0, 0, 2); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); + } + + async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv6 = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6) + .await; + } + + async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + let request = AnnounceRequestBuilder::default().into(); + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + response + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { + let tracker = initialized_public_tracker(); + + add_a_torrent_peer_using_ipv6(tracker.clone()).await; + + let response = announce_a_new_peer_using_ipv4(tracker.clone()).await; + + // The response should not contain the peer using IPV6 + let peers: Option>> = match response { + Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv6_peers = peers.is_none(); + assert!(no_ipv6_peers); + } + + #[tokio::test] + async fn should_send_the_upd4_announce_event() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Announce); + + let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + handle_announce( + sample_ipv4_socket_address(), + &AnnounceRequestBuilder::default().into(), + tracker.clone(), + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::{ + udp::{ + handle_announce, + handlers::tests::{ + announce_request::AnnounceRequestBuilder, initialized_public_tracker, TorrentPeerBuilder, + }, + }, + PeerId, + }; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { + let tracker = initialized_public_tracker(); + + let client_ip = Ipv4Addr::new(127, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let external_ip_in_tracker_configuration = + tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + + let expected_peer = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V4(external_ip_in_tracker_configuration), client_port)) + .into(); + + assert_eq!(peers[0], expected_peer); + } + } + } + + mod using_ipv6 { + + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + }; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, + Response, ResponsePeer, + }; + + use crate::{ + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::{ + handle_announce, + handlers::tests::{ + announce_request::AnnounceRequestBuilder, default_tracker_config, initialized_public_tracker, + sample_ipv6_remote_addr, TorrentPeerBuilder, TrackerStatsServiceMock, + }, + }, + PeerId, + }; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let tracker = initialized_public_tracker(); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let expected_peer = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + assert_eq!(peers[0], expected_peer); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let request = AnnounceRequestBuilder::default().into(); + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + + let response = handle_announce(remote_addr, &request, initialized_public_tracker()) + .await + .unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32), + leechers: NumberOfPeers(0i32), + seeders: NumberOfPeers(1i32), + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let tracker = initialized_public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = "::100".parse().unwrap(); // IPV4 ::0.0.1.0 -> IPV6 = ::100 = ::ffff:0:100 = 0:0:0:0:0:ffff:0:0100 + let remote_client_port = 8081; + let peer_address = "126.0.0.1".parse().unwrap(); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + // When using IPv6 the tracker converts the remote client ip into a IPv4 address + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); + } + + async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv4 = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .into(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4) + .await; + } + + async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default().into(); + let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + response + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { + let tracker = initialized_public_tracker(); + + add_a_torrent_peer_using_ipv4(tracker.clone()).await; + + let response = announce_a_new_peer_using_ipv6(tracker.clone()).await; + + // The response should not contain the peer using IPV4 + let peers: Option>> = match response { + Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv4_peers = peers.is_none(); + assert!(no_ipv4_peers); + } + + #[tokio::test] + async fn should_send_the_upd6_announce_event() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Announce); + + let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + handle_announce( + sample_ipv6_remote_addr(), + &AnnounceRequestBuilder::default().into(), + tracker.clone(), + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + }; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::{ + statistics::StatsTracker, + tracker::tracker::TorrentTracker, + udp::{ + handle_announce, + handlers::tests::{announce_request::AnnounceRequestBuilder, TrackerConfigurationBuilder}, + }, + }; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + let tracker = + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()); + + let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); + let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + + let client_ip_v4 = loopback_ipv4; + let client_ip_v6 = loopback_ipv6; + let client_port = 8080; + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let request = AnnounceRequestBuilder::default() + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let _external_ip_in_tracker_configuration = + tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + + // There's a special type of IPv6 addresses that provide compatibility with IPv4. + // The last 32 bits of these addresses represent an IPv4, and are represented like this: + // 1111:2222:3333:4444:5555:6666:1.2.3.4 + // + // ::127.0.0.1 is the IPV6 representation for the IPV4 address 127.0.0.1. + assert_eq!(Ok(peers[0].peer_addr.ip()), "::126.0.0.1".parse()); + } + } + } + } + + mod scrape_request { + use std::{net::SocketAddr, sync::Arc}; + + use aquatic_udp_protocol::{ + InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, + TransactionId, + }; + + use crate::{ + protocol::utils::get_connection_id, + tracker::tracker::TorrentTracker, + udp::{ + handle_scrape, + handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}, + }, + PeerId, + }; + + use super::TorrentPeerBuilder; + + fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + } + } + + #[tokio::test] + async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { + let remote_addr = sample_ipv4_remote_addr(); + + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + let request = ScrapeRequest { + connection_id: get_connection_id(&remote_addr), + transaction_id: TransactionId(0i32), + info_hashes, + }; + + let response = handle_scrape(remote_addr, &request, initialized_public_tracker()) + .await + .unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!( + response, + Response::from(ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats: expected_torrent_stats + }) + ); + } + + async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + let peer_id = PeerId([255u8; 20]); + + let peer = TorrentPeerBuilder::default() + .with_peer_id(PeerId(peer_id.0)) + .with_peer_addr(*remote_addr) + .with_bytes_left(0) + .into(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer) + .await; + } + + fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { + let info_hashes = vec![*info_hash]; + + ScrapeRequest { + connection_id: get_connection_id(remote_addr), + transaction_id: TransactionId(0i32), + info_hashes, + } + } + + async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap() + } + + fn match_scrape_response(response: Response) -> Option { + match response { + Response::Scrape(scrape_response) => Some(scrape_response), + _ => None, + } + } + + mod with_a_public_tracker { + use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::udp::handlers::tests::{ + initialized_public_tracker, + scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}, + }; + + #[tokio::test] + async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { + let tracker = initialized_public_tracker(); + + let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + }]; + + assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); + } + } + + mod with_a_private_tracker { + + use aquatic_udp_protocol::InfoHash; + + use crate::udp::{ + handle_scrape, + handlers::tests::{ + initialized_private_tracker, sample_ipv4_remote_addr, + scrape_request::{ + add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, + }, + }, + }; + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { + let tracker = initialized_private_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let non_existing_info_hash = InfoHash([0u8; 20]); + + let request = build_scrape_request(&remote_addr, &non_existing_info_hash); + + let torrent_stats = + match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_tracker_has_the_requested_torrent_because_authenticated_requests_are_not_supported_in_udp_tracker( + ) { + let tracker = initialized_private_tracker(); + + let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + mod with_a_whitelisted_tracker { + use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::udp::{ + handle_scrape, + handlers::tests::{ + initialized_whitelisted_tracker, sample_ipv4_remote_addr, + scrape_request::{add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics}, + }, + }; + + #[tokio::test] + async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { + let tracker = initialized_whitelisted_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + tracker.add_torrent_to_memory_whitelist(&info_hash.0.into()).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = + match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + }]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { + let tracker = initialized_whitelisted_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = + match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + fn sample_scrape_request(remote_addr: &SocketAddr) -> ScrapeRequest { + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + ScrapeRequest { + connection_id: get_connection_id(remote_addr), + transaction_id: TransactionId(0i32), + info_hashes, + } + } + + mod using_ipv4 { + use std::sync::Arc; + + use crate::{ + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::handlers::{ + handle_scrape, + tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}, + }, + }; + + use super::sample_scrape_request; + + #[tokio::test] + async fn should_send_the_upd4_scrape_event() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); + + let remote_addr = sample_ipv4_remote_addr(); + let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) + .await + .unwrap(); + } + } + + mod using_ipv6 { + use std::sync::Arc; + + use crate::{ + statistics::TrackerStatisticsEvent, + tracker::tracker::TorrentTracker, + udp::handlers::{ + handle_scrape, + tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}, + }, + }; + + use super::sample_scrape_request; + + #[tokio::test] + async fn should_send_the_upd6_scrape_event() { + let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + + tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); + + let remote_addr = sample_ipv6_remote_addr(); + let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) + .await + .unwrap(); + } + } + } +} From 6efbb64df1d79b22f6c56ff33215814178b3a89b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 Sep 2022 17:05:29 +0100 Subject: [PATCH 079/435] test: add test for tracker::torrent::TorrentPeer --- src/tracker/peer.rs | 254 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 254 insertions(+) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 71c411b9b..712c65017 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -86,3 +86,257 @@ impl TorrentPeer { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } } + +#[cfg(test)] +mod test { + mod torrent_peer { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::{ + peer::TorrentPeer, + protocol::clock::{DefaultClock, Time}, + PeerId, + }; + + #[test] + fn it_should_be_serializable() { + let torrent_peer = TorrentPeer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DefaultClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + + let json_serialized_value = serde_json::to_string(&torrent_peer).unwrap(); + + assert_eq!( + json_serialized_value, + // todo: compare using pretty json format to improve readability + r#"{"peer_id":{"id":"2d71423030303030303030303030303030303030","client":"qBittorrent"},"peer_addr":"126.0.0.1:8080","updated":0,"uploaded":0,"downloaded":0,"left":0,"event":"Started"}"# + ); + } + } + + mod torrent_peer_constructor_from_udp_requests { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, + }; + + use crate::protocol::utils::get_connection_id; + + use crate::peer::TorrentPeer; + + // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. + + fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() + } + + fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + struct AnnounceRequestBuilder { + request: AnnounceRequest, + } + + impl AnnounceRequestBuilder { + pub fn default() -> AnnounceRequestBuilder { + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); + + let default_request = AnnounceRequest { + connection_id: get_connection_id(&sample_ipv4_remote_addr()), + transaction_id: TransactionId(0i32), + info_hash: info_hash_aquatic, + peer_id: AquaticPeerId(*b"-qB00000000000000000"), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(client_ip), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client_port), + }; + AnnounceRequestBuilder { + request: default_request, + } + } + + pub fn into(self) -> AnnounceRequest { + self.request + } + } + + #[test] + fn it_should_use_the_udp_source_ip_as_the_peer_ip_address_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + let announce_request = AnnounceRequestBuilder::default().into(); + + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); + } + + #[test] + fn it_should_always_use_the_port_in_the_announce_request_for_the_peer_port() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + let announce_request = AnnounceRequestBuilder::default().into(); + + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); + } + + mod when_source_udp_ip_is_a_ipv_4_loopback_ip { + + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + str::FromStr, + }; + + use crate::peer::{test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder, TorrentPeer}; + + #[test] + fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); + } + + #[test] + fn it_should_use_the_external_host_ip_in_tracker_configuration_if_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_defined_even_if_the_external_ip_is_an_ipv6_ip() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); + } + } + + mod when_source_udp_ip_is_a_ipv6_loopback_ip { + + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + str::FromStr, + }; + + use crate::peer::{test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder, TorrentPeer}; + + #[test] + fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); + } + + #[test] + fn it_should_use_the_external_host_ip_in_tracker_configuration_if_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_defined_even_if_the_external_ip_is_an_ipv4_ip() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + let announce_request = AnnounceRequestBuilder::default().into(); + + let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); + } + } + } + + mod torrent_peer_constructor_from_for_http_requests { + use crate::{http::AnnounceRequest, peer::TorrentPeer, InfoHash, PeerId}; + + use std::net::{IpAddr, Ipv4Addr}; + + fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> AnnounceRequest { + AnnounceRequest { + info_hash: InfoHash([0u8; 20]), + peer_addr, + downloaded: 0u64, + uploaded: 0u64, + peer_id: PeerId(*b"-qB00000000000000000"), + port, + left: 0u64, + event: None, + compact: None, + } + } + + #[test] + fn it_should_use_the_source_ip_in_the_udp_heder_as_the_peer_ip_address_ignoring_the_peer_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + + let ip_in_announce_request = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + let announce_request = sample_http_announce_request(ip_in_announce_request, 8080); + + let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr.ip(), remote_ip); + assert_ne!(torrent_peer.peer_addr.ip(), ip_in_announce_request); + } + + #[test] + fn it_should_always_use_the_port_in_the_announce_request_for_the_peer_port_ignoring_the_port_in_the_udp_header() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + let remote_port = 8080; + + let port_in_announce_request = 8081; + let announce_request = + sample_http_announce_request(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), port_in_announce_request); + + let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + + assert_eq!(torrent_peer.peer_addr.port(), announce_request.port); + assert_ne!(torrent_peer.peer_addr.port(), remote_port); + } + + // todo: other cases are already covered by UDP cases. + // Code review: + // We should extract the method "peer_addr_from_ip_and_port_and_opt_host_ip" from TorrentPeer. + // It could be another service responsible for assigning the IP to the peer. + // So we can test that behavior independently from where you use it. + // We could also build the peer with the IP in the announce request and let the tracker decide + // wether it has to change it or not depending on tracker configuration. + } +} From 3f617ebf44f82886c9f0cbf9fa691039f4f13dc1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 Sep 2022 19:20:21 +0100 Subject: [PATCH 080/435] test: add test for InfoHash --- src/protocol/common.rs | 95 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/src/protocol/common.rs b/src/protocol/common.rs index 92a3ed51c..f1bd6a99c 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -94,6 +94,101 @@ impl<'de> serde::de::Deserialize<'de> for InfoHash { } } +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use serde::{Deserialize, Serialize}; + use serde_json::json; + + use crate::InfoHash; + + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] + struct ContainingInfoHash { + pub info_hash: InfoHash, + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); + assert!(info_hash.is_ok()); + } + + #[test] + fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { + let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { + let info_hash = InfoHash::from_str(&"F".repeat(39)); + assert!(info_hash.is_err()); + + let info_hash = InfoHash::from_str(&"F".repeat(41)); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + let output = format!("{}", info_hash); + + assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { + let info_hash: InfoHash = [255u8; 20].as_slice().into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { + let info_hash: InfoHash = [255u8; 20].into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_serialized() { + let s = ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + }; + + let json_serialized_value = serde_json::to_string(&s).unwrap(); + + assert_eq!( + json_serialized_value, + r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# + ); + } + + #[test] + fn an_info_hash_can_be_deserialized() { + let json = json!({ + "info_hash": "ffffffffffffffffffffffffffffffffffffffff", + }); + + let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); + + assert_eq!( + s, + ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + } + ); + } +} + struct InfoHashVisitor; impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { From 8d268b02a4df052440ddf0f4f4b14be282f5f04d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 Sep 2022 18:18:37 +0100 Subject: [PATCH 081/435] test: add tests to TorrentEntry --- src/tracker/torrent.rs | 269 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 269 insertions(+) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index f12f0a622..ff257fb90 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -98,3 +98,272 @@ pub enum TorrentError { CouldNotSendResponse, InvalidInfoHash, } + +#[cfg(test)] +mod tests { + use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + ops::Sub, + time::Duration, + }; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::{ + peer::TorrentPeer, + protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}, + torrent::TorrentEntry, + PeerId, + }; + + struct TorrentPeerBuilder { + peer: TorrentPeer, + } + + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = TorrentPeer { + peer_id: PeerId([0u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DefaultClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + pub fn into(self) -> TorrentPeer { + self.peer + } + } + + /// A torrent seeder is a peer with 0 bytes left to download which + /// has not announced it has stopped + fn a_torrent_seeder() -> TorrentPeer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(0) + .with_event_completed() + .into() + } + + /// A torrent leecher is a peer that is not a seeder. + /// Leecher: left > 0 OR event = Stopped + fn a_torrent_leecher() -> TorrentPeer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(1) + .with_event_completed() + .into() + } + + #[test] + fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { + let torrent_entry = TorrentEntry::new(); + + assert_eq!(torrent_entry.get_peers(None).len(), 0); + } + + #[test] + fn a_new_peer_can_be_added_to_a_torrent_entry() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.update_peer(&torrent_peer); // Add the peer + + assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); + assert_eq!(torrent_entry.get_peers(None).len(), 1); + } + + #[test] + fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.update_peer(&torrent_peer); // Add the peer + + assert_eq!(torrent_entry.get_peers(None), vec![&torrent_peer]); + } + + #[test] + fn a_peer_can_be_updated_in_a_torrent_entry() { + let mut torrent_entry = TorrentEntry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); + } + + #[test] + fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { + let mut torrent_entry = TorrentEntry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Stopped; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_peers(None).len(), 0); + } + + #[test] + fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { + let mut torrent_entry = TorrentEntry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + let stats_have_changed = torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert!(stats_have_changed); + } + + #[test] + fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( + ) { + let mut torrent_entry = TorrentEntry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Add a peer that did not exist before in the entry + let torrent_stats_have_not_changed = !torrent_entry.update_peer(&torrent_peer_announcing_complete_event); + + assert!(torrent_stats_have_not_changed); + } + + #[test] + fn a_torrent_entry_could_filter_out_peers_with_a_given_socket_address() { + let mut torrent_entry = TorrentEntry::new(); + let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); + torrent_entry.update_peer(&torrent_peer); // Add peer + + // Get peers excluding the one we have just added + let peers = torrent_entry.get_peers(Some(&peer_socket_address)); + + assert_eq!(peers.len(), 0); + } + + fn peer_id_from_i32(number: i32) -> PeerId { + let peer_id = number.to_le_bytes(); + PeerId([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], + peer_id[3], + ]) + } + + #[test] + fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { + let mut torrent_entry = TorrentEntry::new(); + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let torrent_peer = TorrentPeerBuilder::default() + .with_peer_id(peer_id_from_i32(peer_number)) + .into(); + torrent_entry.update_peer(&torrent_peer); + } + + let peers = torrent_entry.get_peers(None); + + assert_eq!(peers.len(), 74) + } + + #[test] + fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_seeder = a_torrent_seeder(); + + torrent_entry.update_peer(&torrent_seeder); // Add seeder + + assert_eq!(torrent_entry.get_stats().0, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_leecher = a_torrent_leecher(); + + torrent_entry.update_peer(&torrent_leecher); // Add leecher + + assert_eq!(torrent_entry.get_stats().2, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( + ) { + let mut torrent_entry = TorrentEntry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer + + // Announce "Completed" torrent download event. + torrent_peer.event = AnnounceEvent::Completed; + torrent_entry.update_peer(&torrent_peer); // Update the peer + + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; + + assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); + } + + #[test] + fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { + let mut torrent_entry = TorrentEntry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Announce "Completed" torrent download event. + // It's the first event announced from this peer. + torrent_entry.update_peer(&torrent_peer_announcing_complete_event); // Add the peer + + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; + + assert_eq!(number_of_peers_with_completed_torrent, 0); + } + + #[test] + fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { + let mut torrent_entry = TorrentEntry::new(); + + let timeout = 120u32; + + let now = WorkingClock::now(); + StoppedClock::local_set(&now); + + let timeout_seconds_before_now = now.sub(Duration::from_secs(timeout as u64)); + let inactive_peer = TorrentPeerBuilder::default() + .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) + .into(); + torrent_entry.update_peer(&inactive_peer); // Add the peer + + torrent_entry.remove_inactive_peers(timeout); + + assert_eq!(torrent_entry.peers.len(), 0); + } +} From a9df618910492ae86358d94871d406997345781f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 22 Sep 2022 17:06:04 +0100 Subject: [PATCH 082/435] fix: vscode setting, remove default formatter Co-authored-by: da2ce7 --- .vscode/settings.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index d87732d93..72e8db7e0 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -11,8 +11,7 @@ "typenum" ], "[rust]": { - "editor.defaultFormatter": "matklad.rust-analyzer", "editor.formatOnSave": true }, "rust-analyzer.checkOnSave.command": "clippy", -} \ No newline at end of file +} From 5e2bef3ce9c2c7924de2fcc93d65debf39c784de Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 17 Oct 2022 17:03:06 +0200 Subject: [PATCH 083/435] format: re-apply rust-format --- src/tracker/peer.rs | 35 +++---- src/tracker/statistics.rs | 3 +- src/tracker/torrent.rs | 18 ++-- src/udp/handlers.rs | 200 +++++++++++++++----------------------- 4 files changed, 101 insertions(+), 155 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 712c65017..09509e50f 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -95,11 +95,9 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::{ - peer::TorrentPeer, - protocol::clock::{DefaultClock, Time}, - PeerId, - }; + use crate::peer::TorrentPeer; + use crate::protocol::clock::{DefaultClock, Time}; + use crate::PeerId; #[test] fn it_should_be_serializable() { @@ -131,9 +129,8 @@ mod test { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::protocol::utils::get_connection_id; - use crate::peer::TorrentPeer; + use crate::protocol::utils::get_connection_id; // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. @@ -201,12 +198,11 @@ mod test { mod when_source_udp_ip_is_a_ipv_4_loopback_ip { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - str::FromStr, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::str::FromStr; - use crate::peer::{test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder, TorrentPeer}; + use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::peer::TorrentPeer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { @@ -243,12 +239,11 @@ mod test { mod when_source_udp_ip_is_a_ipv6_loopback_ip { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - str::FromStr, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::str::FromStr; - use crate::peer::{test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder, TorrentPeer}; + use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::peer::TorrentPeer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { @@ -285,10 +280,12 @@ mod test { } mod torrent_peer_constructor_from_for_http_requests { - use crate::{http::AnnounceRequest, peer::TorrentPeer, InfoHash, PeerId}; - use std::net::{IpAddr, Ipv4Addr}; + use crate::http::AnnounceRequest; + use crate::peer::TorrentPeer; + use crate::{InfoHash, PeerId}; + fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> AnnounceRequest { AnnounceRequest { info_hash: InfoHash([0u8; 20]), diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index cf801e1df..a2a0de99b 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -1,5 +1,6 @@ -use async_trait::async_trait; use std::sync::Arc; + +use async_trait::async_trait; use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::Sender; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index ff257fb90..65eaa0a40 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -101,20 +101,16 @@ pub enum TorrentError { #[cfg(test)] mod tests { - use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - ops::Sub, - time::Duration, - }; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::ops::Sub; + use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::{ - peer::TorrentPeer, - protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}, - torrent::TorrentEntry, - PeerId, - }; + use crate::peer::TorrentPeer; + use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}; + use crate::torrent::TorrentEntry; + use crate::PeerId; struct TorrentPeerBuilder { peer: TorrentPeer, diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 3c4074eae..5e286b9f7 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -239,26 +239,23 @@ fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { #[cfg(test)] mod tests { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::Arc, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use async_trait::async_trait; - use tokio::sync::{mpsc::error::SendError, RwLock, RwLockReadGuard}; - - use crate::{ - mode::TrackerMode, - peer::TorrentPeer, - protocol::clock::{DefaultClock, Time}, - statistics::{ - StatsTracker, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatisticsRepository, - TrackerStatsService, - }, - tracker::tracker::TorrentTracker, - Configuration, PeerId, + use tokio::sync::mpsc::error::SendError; + use tokio::sync::{RwLock, RwLockReadGuard}; + + use crate::mode::TrackerMode; + use crate::peer::TorrentPeer; + use crate::protocol::clock::{DefaultClock, Time}; + use crate::statistics::{ + StatsTracker, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatisticsRepository, + TrackerStatsService, }; + use crate::tracker::tracker::TorrentTracker; + use crate::{Configuration, PeerId}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) @@ -401,18 +398,14 @@ mod tests { use std::sync::Arc; - use crate::{ - protocol::utils::get_connection_id, - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::{ - handle_connect, - handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}, - }, - }; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use crate::protocol::utils::get_connection_id; + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_connect; + use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -492,7 +485,8 @@ mod tests { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::{protocol::utils::get_connection_id, udp::handlers::tests::sample_ipv4_remote_addr}; + use crate::protocol::utils::get_connection_id; + use crate::udp::handlers::tests::sample_ipv4_remote_addr; struct AnnounceRequestBuilder { request: AnnounceRequest, @@ -550,28 +544,23 @@ mod tests { mod using_ipv4 { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::Arc, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{ AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; - use crate::{ - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::{ - handle_announce, - handlers::tests::{ - announce_request::AnnounceRequestBuilder, default_tracker_config, initialized_public_tracker, - sample_ipv4_socket_address, TorrentPeerBuilder, TrackerStatsServiceMock, - }, - }, - PeerId, + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_announce; + use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::udp::handlers::tests::{ + default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, + TrackerStatsServiceMock, }; + use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -718,15 +707,10 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::{ - udp::{ - handle_announce, - handlers::tests::{ - announce_request::AnnounceRequestBuilder, initialized_public_tracker, TorrentPeerBuilder, - }, - }, - PeerId, - }; + use crate::udp::handle_announce; + use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; + use crate::PeerId; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -764,28 +748,23 @@ mod tests { mod using_ipv6 { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::Arc, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{ AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; - use crate::{ - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::{ - handle_announce, - handlers::tests::{ - announce_request::AnnounceRequestBuilder, default_tracker_config, initialized_public_tracker, - sample_ipv6_remote_addr, TorrentPeerBuilder, TrackerStatsServiceMock, - }, - }, - PeerId, + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_announce; + use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::udp::handlers::tests::{ + default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, + TrackerStatsServiceMock, }; + use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -934,21 +913,16 @@ mod tests { } mod from_a_loopback_ip { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::Arc, - }; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::{ - statistics::StatsTracker, - tracker::tracker::TorrentTracker, - udp::{ - handle_announce, - handlers::tests::{announce_request::AnnounceRequestBuilder, TrackerConfigurationBuilder}, - }, - }; + use crate::statistics::StatsTracker; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_announce; + use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::udp::handlers::tests::TrackerConfigurationBuilder; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { @@ -993,24 +967,20 @@ mod tests { } mod scrape_request { - use std::{net::SocketAddr, sync::Arc}; + use std::net::SocketAddr; + use std::sync::Arc; use aquatic_udp_protocol::{ InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; - use crate::{ - protocol::utils::get_connection_id, - tracker::tracker::TorrentTracker, - udp::{ - handle_scrape, - handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}, - }, - PeerId, - }; - use super::TorrentPeerBuilder; + use crate::protocol::utils::get_connection_id; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handle_scrape; + use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; + use crate::PeerId; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1093,10 +1063,8 @@ mod tests { mod with_a_public_tracker { use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handlers::tests::{ - initialized_public_tracker, - scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}, - }; + use crate::udp::handlers::tests::initialized_public_tracker; + use crate::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { @@ -1118,15 +1086,11 @@ mod tests { use aquatic_udp_protocol::InfoHash; - use crate::udp::{ - handle_scrape, - handlers::tests::{ - initialized_private_tracker, sample_ipv4_remote_addr, - scrape_request::{ - add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, - }, - }, + use crate::udp::handle_scrape; + use crate::udp::handlers::tests::scrape_request::{ + add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; + use crate::udp::handlers::tests::{initialized_private_tracker, sample_ipv4_remote_addr}; #[tokio::test] async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { @@ -1161,13 +1125,11 @@ mod tests { mod with_a_whitelisted_tracker { use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::{ - handle_scrape, - handlers::tests::{ - initialized_whitelisted_tracker, sample_ipv4_remote_addr, - scrape_request::{add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics}, - }, + use crate::udp::handle_scrape; + use crate::udp::handlers::tests::scrape_request::{ + add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; + use crate::udp::handlers::tests::{initialized_whitelisted_tracker, sample_ipv4_remote_addr}; #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { @@ -1228,16 +1190,11 @@ mod tests { mod using_ipv4 { use std::sync::Arc; - use crate::{ - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::handlers::{ - handle_scrape, - tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}, - }, - }; - use super::sample_scrape_request; + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handlers::handle_scrape; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -1257,16 +1214,11 @@ mod tests { mod using_ipv6 { use std::sync::Arc; - use crate::{ - statistics::TrackerStatisticsEvent, - tracker::tracker::TorrentTracker, - udp::handlers::{ - handle_scrape, - tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}, - }, - }; - use super::sample_scrape_request; + use crate::statistics::TrackerStatisticsEvent; + use crate::tracker::tracker::TorrentTracker; + use crate::udp::handlers::handle_scrape; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { From 873293a6f871ba6102d75df2379471b43bcf0096 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 21 Sep 2022 21:12:22 +0200 Subject: [PATCH 084/435] crypto: ephemeral instance seeds with keepers * Implement a lazy static that holds ephemeral seeds that are freshly generated on app load. * New `crypto` supporting module that provides a simple interface to accessing the seed. * Provide a 'default seed', that is zeroed-out when testing, and random when not testing. --- src/lib.rs | 11 +++++ src/main.rs | 5 ++- src/protocol/crypto.rs | 98 ++++++++++++++++++++++++++++++++++++++++++ src/protocol/mod.rs | 1 + 4 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 src/protocol/crypto.rs diff --git a/src/lib.rs b/src/lib.rs index 882e126bc..5f003b5fd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,3 +27,14 @@ pub mod static_time { pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); } } + +pub mod ephemeral_instance_keys { + use rand::rngs::ThreadRng; + use rand::Rng; + + pub type Seed = [u8; 32]; + + lazy_static! { + pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); + } +} diff --git a/src/main.rs b/src/main.rs index 47896ff43..bac7854bb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use log::info; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::tracker::TorrentTracker; -use torrust_tracker::{logging, setup, static_time, Configuration}; +use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; #[tokio::main] async fn main() { @@ -12,6 +12,9 @@ async fn main() { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + // Initialize Torrust config let config = match Configuration::load_from_file(CONFIG_PATH) { Ok(config) => Arc::new(config), diff --git a/src/protocol/crypto.rs b/src/protocol/crypto.rs new file mode 100644 index 000000000..18cfaf5e6 --- /dev/null +++ b/src/protocol/crypto.rs @@ -0,0 +1,98 @@ +pub mod keys { + + pub mod seeds { + use self::detail::DEFAULT_SEED; + use crate::ephemeral_instance_keys::{Seed, RANDOM_SEED}; + + pub trait SeedKeeper { + type Seed: Sized + Default + AsMut<[u8]>; + fn get_seed() -> &'static Self::Seed; + } + + pub struct InstanceSeed; + pub struct DefaultSeed; + + impl SeedKeeper for InstanceSeed { + type Seed = Seed; + + fn get_seed() -> &'static Self::Seed { + &RANDOM_SEED + } + } + + impl SeedKeeper for DefaultSeed { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &DEFAULT_SEED + } + } + + #[cfg(test)] + mod tests { + use super::detail::ZEROED_TEST_SEED; + use super::{DefaultSeed, InstanceSeed, SeedKeeper}; + use crate::ephemeral_instance_keys::Seed; + + pub struct ZeroedTestSeed; + + impl SeedKeeper for ZeroedTestSeed { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &ZEROED_TEST_SEED + } + } + + #[test] + fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { + assert_eq!(DefaultSeed::get_seed(), ZeroedTestSeed::get_seed()) + } + + #[test] + fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { + assert_ne!(DefaultSeed::get_seed(), InstanceSeed::get_seed()) + } + } + + mod detail { + use crate::ephemeral_instance_keys::Seed; + + #[allow(dead_code)] + pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; + + #[cfg(test)] + pub use ZEROED_TEST_SEED as DEFAULT_SEED; + + #[cfg(not(test))] + pub use crate::ephemeral_instance_keys::RANDOM_SEED as DEFAULT_SEED; + + #[cfg(test)] + mod tests { + use std::convert::TryInto; + + use crate::ephemeral_instance_keys::RANDOM_SEED; + use crate::protocol::crypto::keys::seeds::detail::ZEROED_TEST_SEED; + use crate::protocol::crypto::keys::seeds::DEFAULT_SEED; + + #[test] + fn it_should_have_a_zero_test_seed() { + assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]) + } + + #[test] + fn it_should_default_to_zeroed_seed_when_testing() { + assert_eq!(*DEFAULT_SEED, *ZEROED_TEST_SEED) + } + + #[test] + fn it_should_have_a_large_random_seed() { + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u64::MAX as u128); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u64::MAX as u128); + } + } + } + } +} diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs index fcb28b3b2..85e4f90ad 100644 --- a/src/protocol/mod.rs +++ b/src/protocol/mod.rs @@ -1,3 +1,4 @@ pub mod clock; pub mod common; +pub mod crypto; pub mod utils; From f2eaf9584cf8d396660f325594b835cbd70dedfa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Oct 2022 13:51:41 +0100 Subject: [PATCH 085/435] feat: use nightly toolchain for formatting @da2ce7 started using the nightly build for rust formatting here: https://github.com/torrust/torrust-tracker/pull/99 And the job 'format' in the workflow did not work, showing these warnings: ``` Warning: can't set `imports_granularity = Module`, unstable features are only available in nightly channel. Warning: can't set `group_imports = StdExternalCrate`, unstable features are only available in nightly channel. Warning: can't set `imports_granularity = Module`, unstable features are only available in nightly channel. Warning: can't set `group_imports = StdExternalCrate`, unstable features are only available in nightly channel. ``` So we needed the nightly channel anyway. --- .github/workflows/test_build_release.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 87f6a9488..1266ae51f 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -13,11 +13,15 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: stable - components: rustfmt + toolchain: nightly + override: true + components: rustfmt, clippy - uses: Swatinem/rust-cache@v1 - name: Check Rust Formatting - run: cargo fmt --check + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --check test: needs: format From c8fa30a509d8fadbe0f7271b4254e064729fafd9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 18 Oct 2022 18:31:28 +0200 Subject: [PATCH 086/435] fix: prepare tests for connection cookie Co-authored-by: Jose Celano --- src/protocol/clock/time_extent.rs | 1 + src/tracker/peer.rs | 1 - src/udp/handlers.rs | 76 +++++++++++++++++++++++-------- 3 files changed, 57 insertions(+), 21 deletions(-) diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index d0713645b..3fa60de82 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -133,6 +133,7 @@ where } } } + fn now_before(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { None => None, diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 09509e50f..2d0985fc9 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -131,7 +131,6 @@ mod test { use crate::peer::TorrentPeer; use crate::protocol::utils::get_connection_id; - // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. fn sample_ipv4_remote_addr() -> SocketAddr { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 5e286b9f7..f22436bc1 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -482,7 +482,8 @@ mod tests { use std::net::Ipv4Addr; use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, + AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, + TransactionId, }; use crate::protocol::utils::get_connection_id; @@ -517,6 +518,11 @@ mod tests { } } + pub fn with_connection_id(mut self, connection_id: ConnectionId) -> Self { + self.request.connection_id = connection_id; + self + } + pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { self.request.info_hash = info_hash; self @@ -552,6 +558,7 @@ mod tests { Response, ResponsePeer, }; + use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::handle_announce; @@ -571,14 +578,16 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -593,9 +602,12 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let request = AnnounceRequestBuilder::default().into(); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); + let response = handle_announce(remote_addr, &request, initialized_public_tracker()) .await .unwrap(); @@ -629,14 +641,16 @@ mod tests { let remote_client_port = 8081; let peer_address = Ipv4Addr::new(126, 0, 0, 2); + let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -663,8 +677,10 @@ mod tests { } async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { - let request = AnnounceRequestBuilder::default().into(); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); response } @@ -707,6 +723,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::protocol::utils::get_connection_id; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; @@ -721,14 +738,16 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -756,6 +775,7 @@ mod tests { Response, ResponsePeer, }; + use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::handle_announce; @@ -776,14 +796,16 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -798,11 +820,15 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let request = AnnounceRequestBuilder::default().into(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); + let response = handle_announce(remote_addr, &request, initialized_public_tracker()) .await .unwrap(); @@ -836,14 +862,16 @@ mod tests { let remote_client_port = 8081; let peer_address = "126.0.0.1".parse().unwrap(); + let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -874,7 +902,9 @@ mod tests { let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - let request = AnnounceRequestBuilder::default().into(); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); response } @@ -903,13 +933,16 @@ mod tests { tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Announce); let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); - handle_announce( - sample_ipv6_remote_addr(), - &AnnounceRequestBuilder::default().into(), - tracker.clone(), - ) - .await - .unwrap(); + + let remote_addr = sample_ipv6_remote_addr(); + + let announce_request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) + .into(); + + handle_announce(remote_addr, &announce_request, tracker.clone()) + .await + .unwrap(); } mod from_a_loopback_ip { @@ -918,6 +951,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::protocol::utils::get_connection_id; use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; use crate::udp::handle_announce; @@ -940,14 +974,16 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(get_connection_id(&remote_addr)) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) .with_port(client_port) .into(); - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -1036,7 +1072,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: get_connection_id(remote_addr), + connection_id: get_connection_id(&remote_addr), transaction_id: TransactionId(0i32), info_hashes, } @@ -1181,7 +1217,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: get_connection_id(remote_addr), + connection_id: get_connection_id(&remote_addr), transaction_id: TransactionId(0i32), info_hashes, } From 43685b69004583f54e16e834b73122a7603483cb Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 18 Oct 2022 18:37:19 +0200 Subject: [PATCH 087/435] dev: connection cookie implmentation --- src/udp/connection_cookie.rs | 178 +++++++++++++++++++++++++++++++++++ src/udp/mod.rs | 1 + 2 files changed, 179 insertions(+) create mode 100644 src/udp/connection_cookie.rs diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs new file mode 100644 index 000000000..712c2ce61 --- /dev/null +++ b/src/udp/connection_cookie.rs @@ -0,0 +1,178 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::ConnectionId; + +use crate::protocol::clock::time_extent::{Extent, TimeExtent}; +use crate::udp::ServerError; + +pub type Cookie = [u8; 8]; + +pub type SinceUnixEpochTimeExtent = TimeExtent; + +pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); + +pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { + connection_id.0.to_le_bytes() +} + +pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { + ConnectionId(i64::from_le_bytes(*connection_cookie)) +} + +pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { + let time_extent = cookie_builder::get_last_time_extent(); + + let cookie = cookie_builder::build(remote_address, &time_extent); + //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); + cookie +} + +pub fn check_connection_cookie( + remote_address: &SocketAddr, + connection_cookie: &Cookie, +) -> Result { + // we loop backwards testing each time_extent until we find one that matches. + // (or the lifetime of time_extents is exhausted) + for offset in 0..=COOKIE_LIFETIME.amount { + let checking_time_extent = cookie_builder::get_last_time_extent().decrease(offset).unwrap(); + + let checking_cookie = cookie_builder::build(remote_address, &checking_time_extent); + //println!("remote_address: {remote_address:?}, time_extent: {checking_time_extent:?}, cookie: {checking_cookie:?}"); + + if *connection_cookie == checking_cookie { + return Ok(checking_time_extent); + } + } + Err(ServerError::InvalidConnectionId) +} + +mod cookie_builder { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + use std::net::SocketAddr; + + use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; + use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent}; + use crate::protocol::crypto::keys::seeds::{DefaultSeed, SeedKeeper}; + + pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { + DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) + .unwrap() + .unwrap() + .increase(COOKIE_LIFETIME.amount) + .unwrap() + } + + pub(super) fn build(remote_address: &SocketAddr, time_extent: &TimeExtent) -> Cookie { + let seed = DefaultSeed::get_seed(); + + let mut hasher = DefaultHasher::new(); + + remote_address.hash(&mut hasher); + time_extent.hash(&mut hasher); + seed.hash(&mut hasher); + + hasher.finish().to_le_bytes() + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use super::cookie_builder::get_last_time_extent; + use crate::protocol::clock::time_extent::Extent; + use crate::protocol::clock::{StoppedClock, StoppedTime}; + use crate::udp::connection_cookie::{check_connection_cookie, make_connection_cookie, Cookie, COOKIE_LIFETIME}; + + fn make_test_socket_addr() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + fn make_test_cookie(remote_address: Option<&SocketAddr>) -> Cookie { + make_connection_cookie(remote_address.unwrap_or(&make_test_socket_addr())) + } + + #[test] + fn it_should_make_a_connection_cookie() { + // remote_address: 127.0.0.1:8080, time_extent: 60, + // seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + const ID_COOKIE: Cookie = [45, 59, 50, 101, 97, 203, 48, 19]; + + let test_cookie = make_test_cookie(None); + //println!("{test_cookie:?}"); + + assert_eq!(test_cookie, ID_COOKIE) + } + + #[test] + fn it_should_make_different_connection_cookie_with_different_remote_addresses() { + let test_remote_address_1 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 1); + let test_remote_address_2 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 2); + let test_remote_address_3 = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 1); + + assert_ne!( + make_test_cookie(Some(&test_remote_address_1)), + make_test_cookie(Some(&test_remote_address_2)) + ); + + assert_ne!( + make_test_cookie(Some(&test_remote_address_1)), + make_test_cookie(Some(&test_remote_address_3)) + ); + + assert_ne!( + make_test_cookie(Some(&test_remote_address_2)), + make_test_cookie(Some(&test_remote_address_3)) + ) + } + + #[test] + fn it_should_make_different_cookies_for_the_next_time_extent() { + let cookie_now = make_test_cookie(None); + + StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + + let cookie_next = make_test_cookie(None); + + assert_ne!(cookie_now, cookie_next) + } + + #[test] + fn it_should_be_valid_for_this_time_extent() { + let cookie_now = make_test_cookie(None); + + check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + } + + #[test] + fn it_should_be_valid_for_the_next_time_extent() { + let cookie_now = make_test_cookie(None); + + StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + + check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + } + + #[test] + fn it_cookies_should_be_valid_for_the_last_time_extent() { + let cookie_now = make_test_cookie(None); + + StoppedClock::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + + check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + } + + #[test] + #[should_panic] + fn it_cookies_should_be_not_valid_after_their_last_time_extent() { + let cookie_now = make_test_cookie(None); + + let last_time_extent = get_last_time_extent().increase(COOKIE_LIFETIME.amount).unwrap(); + + StoppedClock::local_set(&last_time_extent.total_next().unwrap().unwrap()); + + check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + } +} diff --git a/src/udp/mod.rs b/src/udp/mod.rs index ae87973f1..4c98875c5 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -3,6 +3,7 @@ pub use self::handlers::*; pub use self::request::*; pub use self::server::*; +pub mod connection_cookie; pub mod errors; pub mod handlers; pub mod request; From 4949424b9158706205753667a5ed6c28ab5209c9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 18 Oct 2022 18:40:18 +0200 Subject: [PATCH 088/435] dev: use the connection cookie implementation --- src/protocol/utils.rs | 14 +--------- src/tracker/peer.rs | 4 +-- src/udp/handlers.rs | 60 ++++++++++++++++++++++++------------------- 3 files changed, 37 insertions(+), 41 deletions(-) diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index 48fe4eb17..ac20aa41e 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,16 +1,4 @@ -use std::net::SocketAddr; - -use aquatic_udp_protocol::ConnectionId; - -use super::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; - -pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { - ConnectionId(((current_time() / 3600) | ((remote_address.port() as u64) << 36)) as i64) -} - -pub fn current_time() -> u64 { - DefaultClock::now().as_secs() -} +use super::clock::DurationSinceUnixEpoch; pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { ser.serialize_u64(unix_time_value.as_millis() as u64) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 2d0985fc9..7ac35179a 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -130,7 +130,7 @@ mod test { }; use crate::peer::TorrentPeer; - use crate::protocol::utils::get_connection_id; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. fn sample_ipv4_remote_addr() -> SocketAddr { @@ -152,7 +152,7 @@ mod test { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: get_connection_id(&sample_ipv4_remote_addr()), + connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId(*b"-qB00000000000000000"), diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index f22436bc1..d46cd9231 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -6,8 +6,8 @@ use aquatic_udp_protocol::{ NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; +use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::peer::TorrentPeer; -use crate::protocol::utils::get_connection_id; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; use crate::tracker::tracker::TorrentTracker; @@ -69,7 +69,8 @@ pub async fn handle_connect( request: &ConnectRequest, tracker: Arc, ) -> Result { - let connection_id = get_connection_id(&remote_addr); + let connection_cookie = make_connection_cookie(&remote_addr); + let connection_id = into_connection_id(&connection_cookie); let response = Response::from(ConnectResponse { transaction_id: request.transaction_id, @@ -94,6 +95,13 @@ pub async fn handle_announce( announce_request: &AnnounceRequest, tracker: Arc, ) -> Result { + match check_connection_cookie(&remote_addr, &from_connection_id(&announce_request.connection_id)) { + Ok(_) => {} + Err(e) => { + return Err(e); + } + } + let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; @@ -401,9 +409,9 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; - use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -426,7 +434,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: get_connection_id(&sample_ipv4_remote_addr()), + connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -445,7 +453,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: get_connection_id(&sample_ipv4_remote_addr()), + connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -486,7 +494,7 @@ mod tests { TransactionId, }; - use crate::protocol::utils::get_connection_id; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::tests::sample_ipv4_remote_addr; struct AnnounceRequestBuilder { @@ -500,7 +508,7 @@ mod tests { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: get_connection_id(&sample_ipv4_remote_addr()), + connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId([255u8; 20]), @@ -558,9 +566,9 @@ mod tests { Response, ResponsePeer, }; - use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ @@ -581,7 +589,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -605,7 +613,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -644,7 +652,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -679,7 +687,7 @@ mod tests { async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); response @@ -723,7 +731,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::protocol::utils::get_connection_id; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; @@ -741,7 +749,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -775,9 +783,9 @@ mod tests { Response, ResponsePeer, }; - use crate::protocol::utils::get_connection_id; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ @@ -799,7 +807,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -826,7 +834,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -865,7 +873,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -903,7 +911,7 @@ mod tests { let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); response @@ -937,7 +945,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); handle_announce(remote_addr, &announce_request, tracker.clone()) @@ -951,9 +959,9 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::protocol::utils::get_connection_id; use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::TrackerConfigurationBuilder; @@ -977,7 +985,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(get_connection_id(&remote_addr)) + .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -1012,8 +1020,8 @@ mod tests { }; use super::TorrentPeerBuilder; - use crate::protocol::utils::get_connection_id; use crate::tracker::tracker::TorrentTracker; + use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; use crate::PeerId; @@ -1034,7 +1042,7 @@ mod tests { let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: get_connection_id(&remote_addr), + connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), transaction_id: TransactionId(0i32), info_hashes, }; @@ -1072,7 +1080,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: get_connection_id(&remote_addr), + connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } @@ -1217,7 +1225,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: get_connection_id(&remote_addr), + connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } From 2911f3d05587bc0501979e76347c73279456b0fe Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 19 Oct 2022 14:08:05 +0200 Subject: [PATCH 089/435] tests: improve connection cookie tests Co-authored-by: Jose Celano --- src/udp/connection_cookie.rs | 161 +++++++++++++++++++++++++---------- 1 file changed, 114 insertions(+), 47 deletions(-) diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 712c2ce61..a17431b9c 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -80,99 +80,166 @@ mod cookie_builder { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use super::cookie_builder::get_last_time_extent; - use crate::protocol::clock::time_extent::Extent; + use super::cookie_builder::{self}; + use crate::protocol::clock::time_extent::{self, Extent}; use crate::protocol::clock::{StoppedClock, StoppedTime}; use crate::udp::connection_cookie::{check_connection_cookie, make_connection_cookie, Cookie, COOKIE_LIFETIME}; - fn make_test_socket_addr() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + // #![feature(const_socketaddr)] + // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + #[test] + fn it_should_make_a_connection_cookie() { + let cookie = make_connection_cookie(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); + + // Note: This constant may need to be updated in the future as the hash is not guaranteed to to be stable between versions. + const ID_COOKIE: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; + + assert_eq!(cookie, ID_COOKIE) } - fn make_test_cookie(remote_address: Option<&SocketAddr>) -> Cookie { - make_connection_cookie(remote_address.unwrap_or(&make_test_socket_addr())) + #[test] + fn it_should_make_the_same_connection_cookie_for_the_same_input_data() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] + //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] + + assert_eq!(cookie, cookie_2) } #[test] - fn it_should_make_a_connection_cookie() { - // remote_address: 127.0.0.1:8080, time_extent: 60, - // seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + fn it_should_make_the_different_connection_cookie_for_different_ip() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::BROADCAST), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 255.255.255.255:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [217, 87, 239, 178, 182, 126, 66, 166] + + assert_ne!(cookie, cookie_2) + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_ip_version() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); - const ID_COOKIE: Cookie = [45, 59, 50, 101, 97, 203, 48, 19]; + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); - let test_cookie = make_test_cookie(None); - //println!("{test_cookie:?}"); + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: [::]:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [99, 119, 230, 177, 20, 220, 163, 187] - assert_eq!(test_cookie, ID_COOKIE) + assert_ne!(cookie, cookie_2) } #[test] - fn it_should_make_different_connection_cookie_with_different_remote_addresses() { - let test_remote_address_1 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 1); - let test_remote_address_2 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 2); - let test_remote_address_3 = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 1); - - assert_ne!( - make_test_cookie(Some(&test_remote_address_1)), - make_test_cookie(Some(&test_remote_address_2)) - ); - - assert_ne!( - make_test_cookie(Some(&test_remote_address_1)), - make_test_cookie(Some(&test_remote_address_3)) - ); - - assert_ne!( - make_test_cookie(Some(&test_remote_address_2)), - make_test_cookie(Some(&test_remote_address_3)) - ) + fn it_should_make_the_different_connection_cookie_for_different_socket() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 1); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 0.0.0.0:1, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [38, 8, 0, 102, 92, 170, 220, 11] + + assert_ne!(cookie, cookie_2) + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_time_extents() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + let time_extent_max = time_extent::MAX; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address, &time_extent_max); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address:?}, time_extent: {time_extent_max:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 18446744073709551615.999999999s, amount: 18446744073709551615 }, cookie: [87, 111, 109, 125, 182, 206, 3, 201] + + assert_ne!(cookie, cookie_2) } #[test] fn it_should_make_different_cookies_for_the_next_time_extent() { - let cookie_now = make_test_cookie(None); + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make_connection_cookie(&remote_address); StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); - let cookie_next = make_test_cookie(None); + let cookie_next = make_connection_cookie(&remote_address); - assert_ne!(cookie_now, cookie_next) + assert_ne!(cookie, cookie_next) } #[test] fn it_should_be_valid_for_this_time_extent() { - let cookie_now = make_test_cookie(None); + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make_connection_cookie(&remote_address); - check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + check_connection_cookie(&remote_address, &cookie).unwrap(); } #[test] fn it_should_be_valid_for_the_next_time_extent() { - let cookie_now = make_test_cookie(None); + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make_connection_cookie(&remote_address); StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); - check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + check_connection_cookie(&remote_address, &cookie).unwrap(); } #[test] - fn it_cookies_should_be_valid_for_the_last_time_extent() { - let cookie_now = make_test_cookie(None); + fn it_should_be_valid_for_the_last_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make_connection_cookie(&remote_address); StoppedClock::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); - check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + check_connection_cookie(&remote_address, &cookie).unwrap(); } #[test] #[should_panic] - fn it_cookies_should_be_not_valid_after_their_last_time_extent() { - let cookie_now = make_test_cookie(None); + fn it_should_be_not_valid_after_their_last_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let last_time_extent = get_last_time_extent().increase(COOKIE_LIFETIME.amount).unwrap(); + let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_set(&last_time_extent.total_next().unwrap().unwrap()); + StoppedClock::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); - check_connection_cookie(&make_test_socket_addr(), &cookie_now).unwrap(); + check_connection_cookie(&remote_address, &cookie).unwrap(); } } From adee3b5267386b642b7b09519eebf049a44f5eea Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 11:03:05 +0100 Subject: [PATCH 090/435] fix: [#97] make tracker statistics optional again Commit 7abe0f5bde1e209553d1a1e2d6fe644cd46a9395 introduced an unwanted change. Thread for statistics is always created regardless configuration. This commit reverts that change. The config option: config.tracker_usage_statistics defines wether the statistics should be enabled or not. --- src/main.rs | 6 +++++- src/tracker/statistics.rs | 6 ------ src/udp/handlers.rs | 18 +++++++++++------- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/src/main.rs b/src/main.rs index bac7854bb..ffe080f9a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,7 +24,11 @@ async fn main() { }; // Initialize stats tracker - let stats_tracker = StatsTracker::new_running_instance(); + let mut stats_tracker = StatsTracker::new(); + + if config.tracker_usage_statistics { + stats_tracker.run_worker(); + } // Initialize Torrust tracker let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker)) { diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index a2a0de99b..2a216770e 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -62,12 +62,6 @@ pub struct StatsTracker { } impl StatsTracker { - pub fn new_running_instance() -> Self { - let mut stats_tracker = Self::new(); - stats_tracker.run_worker(); - stats_tracker - } - pub fn new() -> Self { Self { channel_sender: None, diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index d46cd9231..8117b6c89 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -271,17 +271,23 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) } fn initialized_private_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) + } + + fn initialized_stats_tracker() -> StatsTracker { + let mut stats_tracker = StatsTracker::new(); + stats_tracker.run_worker(); + stats_tracker } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -959,18 +965,16 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::TrackerConfigurationBuilder; + use crate::udp::handlers::tests::{initialized_stats_tracker, TrackerConfigurationBuilder}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let tracker = - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_running_instance())).unwrap()); + let tracker = Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); From e12d8e6ec142883ae5734113920f7c8cdb80c30a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 18:29:13 +0100 Subject: [PATCH 091/435] test: [#97] add test for optional statistics Tracker statistics can be enabled or disabled using the configuration option `tracker_usage_statistics`. This commit adds tests for that behavior. --- src/main.rs | 6 +---- src/tracker/statistics.rs | 51 +++++++++++++++++++++++++++++++++++++++ src/udp/handlers.rs | 18 ++++++-------- 3 files changed, 59 insertions(+), 16 deletions(-) diff --git a/src/main.rs b/src/main.rs index ffe080f9a..dcb92acb8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,11 +24,7 @@ async fn main() { }; // Initialize stats tracker - let mut stats_tracker = StatsTracker::new(); - - if config.tracker_usage_statistics { - stats_tracker.run_worker(); - } + let stats_tracker = StatsTracker::new_instance(config.tracker_usage_statistics); // Initialize Torrust tracker let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker)) { diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 2a216770e..fb4e4c0fe 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -62,6 +62,27 @@ pub struct StatsTracker { } impl StatsTracker { + pub fn new_active_instance() -> Self { + Self::new_instance(true) + } + + pub fn new_inactive_instance() -> Self { + Self::new_instance(false) + } + + pub fn new_instance(active: bool) -> Self { + let mut stats_tracker = Self { + channel_sender: None, + stats: Arc::new(RwLock::new(TrackerStatistics::new())), + }; + + if active { + stats_tracker.run_worker(); + } + + stats_tracker + } + pub fn new() -> Self { Self { channel_sender: None, @@ -155,3 +176,33 @@ impl TrackerStatisticsRepository for StatsTracker { pub trait TrackerStatsService: TrackerStatisticsEventSender + TrackerStatisticsRepository {} impl TrackerStatsService for StatsTracker {} + +#[cfg(test)] +mod test { + + mod event_sender { + use crate::statistics::{StatsTracker, TrackerStatisticsEvent, TrackerStatisticsEventSender}; + + #[tokio::test] + async fn should_not_send_any_event_when_statistics_are_disabled() { + let tracker_usage_statistics = false; + + let inactive_stats_tracker = StatsTracker::new_instance(tracker_usage_statistics); + + let result = inactive_stats_tracker.send_event(TrackerStatisticsEvent::Tcp4Announce).await; + + assert!(result.is_none()); + } + + #[tokio::test] + async fn should_send_events_when_statistics_are_enabled() { + let tracker_usage_statistics = true; + + let active_stats_tracker = StatsTracker::new_instance(tracker_usage_statistics); + + let result = active_stats_tracker.send_event(TrackerStatisticsEvent::Tcp4Announce).await; + + assert!(result.is_some()); + } + } +} diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 8117b6c89..845b860e9 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -271,23 +271,17 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) } fn initialized_private_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()) - } - - fn initialized_stats_tracker() -> StatsTracker { - let mut stats_tracker = StatsTracker::new(); - stats_tracker.run_worker(); - stats_tracker + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -965,16 +959,18 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{initialized_stats_tracker, TrackerConfigurationBuilder}; + use crate::udp::handlers::tests::TrackerConfigurationBuilder; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let tracker = Arc::new(TorrentTracker::new(configuration, Box::new(initialized_stats_tracker())).unwrap()); + let tracker = + Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); From a27adf23d997663e6bf2f40c3021749faac0b7c1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Oct 2022 18:27:18 +0100 Subject: [PATCH 092/435] test: integration tests for udp tracker --- tests/udp.rs | 293 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 293 insertions(+) create mode 100644 tests/udp.rs diff --git a/tests/udp.rs b/tests/udp.rs new file mode 100644 index 000000000..be4e42e41 --- /dev/null +++ b/tests/udp.rs @@ -0,0 +1,293 @@ +/// Integration tests for UDP tracker server +/// +/// cargo test udp_tracker_server -- --nocapture + +#[macro_use] +extern crate lazy_static; + +extern crate rand; + +mod udp_tracker_server { + + use core::panic; + use std::io::Cursor; + use std::net::IpAddr; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::Arc; + use std::sync::RwLock; + + use rand::{thread_rng, Rng}; + + use tokio::net::UdpSocket; + use tokio::task::JoinHandle; + + use torrust_tracker::jobs::udp_tracker; + use torrust_tracker::tracker::statistics::StatsTracker; + use torrust_tracker::tracker::tracker::TorrentTracker; + use torrust_tracker::udp::MAX_PACKET_SIZE; + use torrust_tracker::{logging, static_time, Configuration}; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, + Port, Request, Response, TransactionId, + }; + + fn tracker_configuration() -> Arc { + let mut config = Configuration::default(); + //config.log_level = Some("debug".to_owned()); // Uncomment to enable logging + config.external_ip = Some("127.0.0.1".to_owned()); + config.udp_trackers[0].bind_address = "127.0.0.1:6969".to_owned(); + Arc::new(config) + } + + fn tracker_bind_address() -> String { + tracker_configuration().udp_trackers[0].bind_address.clone() + } + + pub struct UdpServer { + pub started: AtomicBool, + pub job: Option>, + } + + impl UdpServer { + pub fn new() -> Self { + Self { + started: AtomicBool::new(false), + job: None, + } + } + + pub async fn start(&mut self, configuration: Arc) { + if !self.started.load(Ordering::Relaxed) { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize stats tracker + let stats_tracker = StatsTracker::new_running_instance(); + + // Initialize Torrust tracker + let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker)) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup_logging(&configuration); + + // Start the UDP tracker job + self.job = Some(udp_tracker::start_job(&configuration.udp_trackers[0], tracker.clone())); + + self.started.store(true, Ordering::Relaxed); + } + } + } + + lazy_static! { + static ref SERVER: RwLock = RwLock::new(UdpServer::new()); + } + + async fn start_udp_server(configuration: Arc) { + SERVER.write().unwrap().start(configuration.clone()).await; + } + + struct UdpClient { + socket: Arc, + } + + impl UdpClient { + async fn bind(local_address: &str) -> Self { + let socket = UdpSocket::bind(local_address).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } + + async fn connect(&self, remote_address: &str) { + self.socket.connect(remote_address).await.unwrap() + } + + async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } + } + + /// Creates a new UdpClient connected to a Udp server + async fn new_connected_udp_client(remote_address: &str) -> UdpClient { + let client = UdpClient::bind(&source_address(ephemeral_random_port())).await; + client.connect(remote_address).await; + client + } + + struct UdpTrackerClient { + pub udp_client: UdpClient, + } + + impl UdpTrackerClient { + async fn send(&self, request: Request) -> usize { + // Write request into a buffer + let request_buffer = vec![0u8; MAX_PACKET_SIZE]; + let mut cursor = Cursor::new(request_buffer); + + let request_data = match request.write(&mut cursor) { + Ok(_) => { + let position = cursor.position() as usize; + let inner_request_buffer = cursor.get_ref(); + // Return slice which contains written request data + &inner_request_buffer[..position] + } + Err(_) => panic!("could not write request to bytes."), + }; + + self.udp_client.send(&request_data).await + } + + async fn receive(&self) -> Response { + let mut response_buffer = [0u8; MAX_PACKET_SIZE]; + + let payload_size = self.udp_client.receive(&mut response_buffer).await; + + Response::from_bytes(&response_buffer[..payload_size], true).unwrap() + } + } + + /// Creates a new UdpTrackerClient connected to a Udp Tracker server + async fn new_connected_udp_tracker_client(remote_address: &str) -> UdpTrackerClient { + let udp_client = new_connected_udp_client(remote_address).await; + UdpTrackerClient { udp_client } + } + + fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] + } + + fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] + } + + /// Generates a random ephemeral port for a client source address + fn ephemeral_random_port() -> u16 { + // todo: this may produce random test failures because two test can try to bind the same port. + // We could either use the same client for all tests (slower) or + // create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) + } + + /// Generates the source address for the UDP client + fn source_address(port: u16) -> String { + format!("127.0.0.1:{}", port) + } + + fn is_error_response(response: &Response, error_message: &str) -> bool { + match response { + Response::Error(error_response) => return error_response.message.starts_with(error_message), + _ => return false, + }; + } + + fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { + match response { + Response::Connect(connect_response) => return connect_response.transaction_id == transaction_id, + _ => return false, + }; + } + + fn is_ipv4_announce_response(response: &Response) -> bool { + match response { + Response::AnnounceIpv4(_) => return true, + _ => return false, + }; + } + + // #[tokio::test] + // async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + // start_udp_server(tracker_configuration().clone()).await; + + // let client = new_connected_udp_client(&tracker_bind_address()).await; + + // client.send(&empty_udp_request()).await; + + // let mut buffer = empty_buffer(); + // client.receive(&mut buffer).await; + // let response = Response::from_bytes(&buffer, true).unwrap(); + + // assert!(is_error_response(&response, "bad request")); + // } + + // #[tokio::test] + // async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { + // start_udp_server(tracker_configuration().clone()).await; + + // let client = new_connected_udp_tracker_client(&tracker_bind_address()).await; + + // let connect_request = ConnectRequest { + // transaction_id: TransactionId(123), + // }; + + // client.send(connect_request.into()).await; + + // let response = client.receive().await; + + // assert!(is_connect_response(&response, TransactionId(123))); + // } + + #[tokio::test] + async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { + start_udp_server(tracker_configuration().clone()).await; + + let client = new_connected_udp_tracker_client(&tracker_bind_address()).await; + + // todo: extract client.connect() -> ConnectionId + + // Get connection id before sending the announce request + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + let connection_id = match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + }; + + // Send announce request + + let client_ip = match client.udp_client.socket.local_addr().unwrap().ip() { + IpAddr::V4(ip4) => ip4, + _ => panic!("error: IPV6 addresses cannot be used for the client ip in the announce request. Try to use IPV4."), + }; + + let announce_request = AnnounceRequest { + connection_id: ConnectionId(8724592475294857), + transaction_id: TransactionId(123i32), + info_hash: InfoHash([0u8; 20]), + peer_id: PeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(client_ip), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client.udp_client.socket.local_addr().unwrap().port()), + }; + + client.send(announce_request.into()).await; + + let response = client.receive().await; + + assert!(is_ipv4_announce_response(&response)); + } +} From 946e80a7a20e9cb31eb15eb04e2b89912ba7e6d6 Mon Sep 17 00:00:00 2001 From: Mick van Dijke Date: Fri, 7 Oct 2022 16:31:11 +0200 Subject: [PATCH 093/435] refactor: run tests with own udp tracker (#98) * refactor: run tests with own udp tracker * fixup! refactor: run tests with own udp tracker --- src/logging.rs | 4 +++ tests/udp.rs | 96 +++++++++++++++++++++++--------------------------- 2 files changed, 49 insertions(+), 51 deletions(-) diff --git a/src/logging.rs b/src/logging.rs index 209c9f848..7552a5459 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -18,6 +18,10 @@ pub fn setup_logging(cfg: &Configuration) { }, }; + if log_level == log::LevelFilter::Off { + return; + } + if let Err(_err) = fern::Dispatch::new() .format(|out, message, record| { out.finish(format_args!( diff --git a/tests/udp.rs b/tests/udp.rs index be4e42e41..ecfd879ce 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -1,20 +1,14 @@ /// Integration tests for UDP tracker server /// /// cargo test udp_tracker_server -- --nocapture - -#[macro_use] -extern crate lazy_static; - extern crate rand; mod udp_tracker_server { - use core::panic; use std::io::Cursor; - use std::net::IpAddr; + use std::net::Ipv4Addr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; - use std::sync::RwLock; use rand::{thread_rng, Rng}; @@ -34,19 +28,15 @@ mod udp_tracker_server { fn tracker_configuration() -> Arc { let mut config = Configuration::default(); - //config.log_level = Some("debug".to_owned()); // Uncomment to enable logging - config.external_ip = Some("127.0.0.1".to_owned()); - config.udp_trackers[0].bind_address = "127.0.0.1:6969".to_owned(); + config.log_level = Some("off".to_owned()); // "off" is necessary when running multiple trackers + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); Arc::new(config) } - fn tracker_bind_address() -> String { - tracker_configuration().udp_trackers[0].bind_address.clone() - } - pub struct UdpServer { pub started: AtomicBool, pub job: Option>, + pub bind_address: Option, } impl UdpServer { @@ -54,6 +44,7 @@ mod udp_tracker_server { Self { started: AtomicBool::new(false), job: None, + bind_address: None, } } @@ -76,20 +67,22 @@ mod udp_tracker_server { // Initialize logging logging::setup_logging(&configuration); + let udp_tracker_config = &configuration.udp_trackers[0]; + // Start the UDP tracker job - self.job = Some(udp_tracker::start_job(&configuration.udp_trackers[0], tracker.clone())); + self.job = Some(udp_tracker::start_job(&udp_tracker_config, tracker.clone())); + + self.bind_address = Some(udp_tracker_config.bind_address.clone()); self.started.store(true, Ordering::Relaxed); } } } - lazy_static! { - static ref SERVER: RwLock = RwLock::new(UdpServer::new()); - } - - async fn start_udp_server(configuration: Arc) { - SERVER.write().unwrap().start(configuration.clone()).await; + async fn new_running_udp_server(configuration: Arc) -> UdpServer { + let mut udp_server = UdpServer::new(); + udp_server.start(configuration).await; + udp_server } struct UdpClient { @@ -207,43 +200,49 @@ mod udp_tracker_server { }; } - // #[tokio::test] - // async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - // start_udp_server(tracker_configuration().clone()).await; + #[tokio::test] + async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + let configuration = tracker_configuration(); - // let client = new_connected_udp_client(&tracker_bind_address()).await; + let udp_server = new_running_udp_server(configuration).await; - // client.send(&empty_udp_request()).await; + let client = new_connected_udp_client(&udp_server.bind_address.unwrap()).await; - // let mut buffer = empty_buffer(); - // client.receive(&mut buffer).await; - // let response = Response::from_bytes(&buffer, true).unwrap(); + client.send(&empty_udp_request()).await; - // assert!(is_error_response(&response, "bad request")); - // } + let mut buffer = empty_buffer(); + client.receive(&mut buffer).await; + let response = Response::from_bytes(&buffer, true).unwrap(); - // #[tokio::test] - // async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { - // start_udp_server(tracker_configuration().clone()).await; + assert!(is_error_response(&response, "bad request")); + } - // let client = new_connected_udp_tracker_client(&tracker_bind_address()).await; + #[tokio::test] + async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { + let configuration = tracker_configuration(); - // let connect_request = ConnectRequest { - // transaction_id: TransactionId(123), - // }; + let udp_server = new_running_udp_server(configuration).await; - // client.send(connect_request.into()).await; + let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - // let response = client.receive().await; + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; - // assert!(is_connect_response(&response, TransactionId(123))); - // } + client.send(connect_request.into()).await; + + let response = client.receive().await; + + assert!(is_connect_response(&response, TransactionId(123))); + } #[tokio::test] async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { - start_udp_server(tracker_configuration().clone()).await; + let configuration = tracker_configuration(); + + let udp_server = new_running_udp_server(configuration).await; - let client = new_connected_udp_tracker_client(&tracker_bind_address()).await; + let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; // todo: extract client.connect() -> ConnectionId @@ -264,13 +263,8 @@ mod udp_tracker_server { // Send announce request - let client_ip = match client.udp_client.socket.local_addr().unwrap().ip() { - IpAddr::V4(ip4) => ip4, - _ => panic!("error: IPV6 addresses cannot be used for the client ip in the announce request. Try to use IPV4."), - }; - let announce_request = AnnounceRequest { - connection_id: ConnectionId(8724592475294857), + connection_id: ConnectionId(connection_id.0), transaction_id: TransactionId(123i32), info_hash: InfoHash([0u8; 20]), peer_id: PeerId([255u8; 20]), @@ -278,7 +272,7 @@ mod udp_tracker_server { bytes_uploaded: NumberOfBytes(0i64), bytes_left: NumberOfBytes(0i64), event: AnnounceEvent::Started, - ip_address: Some(client_ip), + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), key: PeerKey(0u32), peers_wanted: NumberOfPeers(1i32), port: Port(client.udp_client.socket.local_addr().unwrap().port()), From f5aee0325c0dc10e902287dc74056b7f4e870940 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 11:44:46 +0100 Subject: [PATCH 094/435] fix: re-format with rust nightly --- tests/udp.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index ecfd879ce..49f8e7c16 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -10,22 +10,19 @@ mod udp_tracker_server { use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, + Port, Request, Response, TransactionId, + }; use rand::{thread_rng, Rng}; - use tokio::net::UdpSocket; use tokio::task::JoinHandle; - use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; use torrust_tracker::{logging, static_time, Configuration}; - use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, - Port, Request, Response, TransactionId, - }; - fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); // "off" is necessary when running multiple trackers From 1f258c1cd01c75d003d0ab130cfa58d31fa4f1b7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 12:17:28 +0100 Subject: [PATCH 095/435] test: integration test for udp scrape request --- tests/udp.rs | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index 49f8e7c16..31f631fd6 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -12,7 +12,7 @@ mod udp_tracker_server { use aquatic_udp_protocol::{ AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, - Port, Request, Response, TransactionId, + Port, Request, Response, ScrapeRequest, TransactionId, }; use rand::{thread_rng, Rng}; use tokio::net::UdpSocket; @@ -164,7 +164,7 @@ mod udp_tracker_server { /// Generates a random ephemeral port for a client source address fn ephemeral_random_port() -> u16 { - // todo: this may produce random test failures because two test can try to bind the same port. + // todo: this may produce random test failures because two tests can try to bind the same port. // We could either use the same client for all tests (slower) or // create a pool of available ports (with read/write lock) let mut rng = thread_rng(); @@ -197,6 +197,13 @@ mod udp_tracker_server { }; } + fn is_scrape_response(response: &Response) -> bool { + match response { + Response::Scrape(_) => return true, + _ => return false, + }; + } + #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { let configuration = tracker_configuration(); @@ -281,4 +288,47 @@ mod udp_tracker_server { assert!(is_ipv4_announce_response(&response)); } + + #[tokio::test] + async fn should_return_a_scrape_response_when_the_client_sends_a_scrape_request() { + let configuration = tracker_configuration(); + + let udp_server = new_running_udp_server(configuration).await; + + let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; + + // todo: extract client.connect() -> ConnectionId + + // Get connection id before sending the announce request + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123i32), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + let connection_id = match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + }; + + // Send scrape request + + // Full scrapes are not allowed so it will return "bad request" error with empty vector + let info_hashes = vec![InfoHash([0u8; 20])]; + + let scrape_request = ScrapeRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hashes, + }; + + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + assert!(is_scrape_response(&response)); + } } From 508803a73aaa651ee50afe99551fd18fcc15f141 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Oct 2022 12:36:35 +0100 Subject: [PATCH 096/435] refactor: extract function --- tests/udp.rs | 47 +++++++++++++++-------------------------------- 1 file changed, 15 insertions(+), 32 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index 31f631fd6..83fec1fb3 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -240,6 +240,19 @@ mod udp_tracker_server { assert!(is_connect_response(&response, TransactionId(123))); } + async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { + let connect_request = ConnectRequest { transaction_id }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + } + } + #[tokio::test] async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { let configuration = tracker_configuration(); @@ -248,22 +261,7 @@ mod udp_tracker_server { let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - // todo: extract client.connect() -> ConnectionId - - // Get connection id before sending the announce request - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123), - }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - let connection_id = match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), - }; + let connection_id = send_connection_request(TransactionId(123), &client).await; // Send announce request @@ -297,22 +295,7 @@ mod udp_tracker_server { let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - // todo: extract client.connect() -> ConnectionId - - // Get connection id before sending the announce request - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123i32), - }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - let connection_id = match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), - }; + let connection_id = send_connection_request(TransactionId(123), &client).await; // Send scrape request From d2c69fa5f531520192de03e66335fa842e06ee7a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 09:24:58 +0100 Subject: [PATCH 097/435] fix: test tear up --- tests/udp.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/udp.rs b/tests/udp.rs index 83fec1fb3..00bb42366 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -51,7 +51,7 @@ mod udp_tracker_server { lazy_static::initialize(&static_time::TIME_AT_APP_START); // Initialize stats tracker - let stats_tracker = StatsTracker::new_running_instance(); + let stats_tracker = StatsTracker::new_active_instance(); // Initialize Torrust tracker let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker)) { From 5dcea43da6fe6e7590ff69f4ac04200d2394acde Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 13:44:22 +0100 Subject: [PATCH 098/435] fix: initialize loggin once `setup_logging` cannot be called twice becuase it panics. --- src/logging.rs | 42 ++++++++++++++++++++++++------------------ tests/udp.rs | 2 +- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/src/logging.rs b/src/logging.rs index 7552a5459..5d0efa8a4 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -1,27 +1,32 @@ -use log::info; +use std::str::FromStr; +use std::sync::Once; + +use log::{info, LevelFilter}; use crate::Configuration; +static INIT: Once = Once::new(); + pub fn setup_logging(cfg: &Configuration) { - let log_level = match &cfg.log_level { - None => log::LevelFilter::Info, - Some(level) => match level.as_str() { - "off" => log::LevelFilter::Off, - "trace" => log::LevelFilter::Trace, - "debug" => log::LevelFilter::Debug, - "info" => log::LevelFilter::Info, - "warn" => log::LevelFilter::Warn, - "error" => log::LevelFilter::Error, - _ => { - panic!("Unknown log level encountered: '{}'", level.as_str()); - } - }, - }; - - if log_level == log::LevelFilter::Off { + let level = config_level_or_default(&cfg.log_level); + + if level == log::LevelFilter::Off { return; } + INIT.call_once(|| { + stdout_config(level); + }); +} + +fn config_level_or_default(log_level: &Option) -> LevelFilter { + match log_level { + None => log::LevelFilter::Info, + Some(level) => LevelFilter::from_str(level).unwrap(), + } +} + +fn stdout_config(level: LevelFilter) { if let Err(_err) = fern::Dispatch::new() .format(|out, message, record| { out.finish(format_args!( @@ -32,11 +37,12 @@ pub fn setup_logging(cfg: &Configuration) { message )) }) - .level(log_level) + .level(level) .chain(std::io::stdout()) .apply() { panic!("Failed to initialize logging.") } + info!("logging initialized."); } diff --git a/tests/udp.rs b/tests/udp.rs index 00bb42366..b391b922f 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -25,7 +25,7 @@ mod udp_tracker_server { fn tracker_configuration() -> Arc { let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); // "off" is necessary when running multiple trackers + config.log_level = Some("off".to_owned()); config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); Arc::new(config) } From 971b34decc01c75b05f9dd0384fa9ad08d532d13 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 24 Oct 2022 15:45:18 +0200 Subject: [PATCH 099/435] chore: update project dependencies * minor api changes for config applied --- Cargo.lock | 984 +++++++++++++++++++++++++++++++------------------- Cargo.toml | 30 +- src/config.rs | 9 +- 3 files changed, 643 insertions(+), 380 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a4fe8b4f..0a60397f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,13 +21,22 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -61,9 +70,9 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-trait" -version = "0.1.53" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" +checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" dependencies = [ "proc-macro2", "quote", @@ -89,15 +98,15 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base-x" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc19a4937b4fbd3fe3379793130e42060d10627a360f2127802b10b87e7baf74" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "bigdecimal" @@ -107,8 +116,8 @@ checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256" dependencies = [ "num-bigint 0.3.3", "num-integer", - "num-traits 0.2.15", - "serde 1.0.137", + "num-traits", + "serde", ] [[package]] @@ -169,9 +178,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] @@ -194,9 +203,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "byteorder" @@ -206,9 +215,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "cc" @@ -222,7 +231,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" dependencies = [ - "nom", + "nom 5.1.2", ] [[package]] @@ -233,23 +242,25 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", - "num-traits 0.2.15", - "serde 1.0.137", + "num-traits", + "serde", "time 0.1.44", + "wasm-bindgen", "winapi", ] [[package]] name = "clang-sys" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cc00842eed744b858222c4c9faf7243aafc6d33f92f96935263ef4d8a41ce21" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ "glob", "libc", @@ -280,17 +291,30 @@ dependencies = [ "cc", ] +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + [[package]] name = "config" -version = "0.11.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1b9d958c2b1368a663f05538fc1b5975adce1e19f435acceae987aceeeb369" +checksum = "11f1667b8320afa80d69d8bbe40830df2c8a06003d86f73d8e003b2c48df416d" dependencies = [ + "async-trait", + "json5", "lazy_static", - "nom", + "nom 7.1.1", + "pathdiff", + "ron", "rust-ini", - "serde 1.0.137", - "serde-hjson", + "serde", "serde_json", "toml", "yaml-rust", @@ -326,9 +350,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -344,14 +368,58 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] +[[package]] +name = "cxx" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "darling" version = "0.14.1" @@ -422,11 +490,11 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", ] @@ -436,11 +504,17 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "dlv-list" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" + [[package]] name = "either" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "env_logger" @@ -469,9 +543,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -487,13 +561,11 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39522e96686d38f4bc984b9198e3a0613264abaebaff2c5c918bfa6b6da09af" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ - "cfg-if", "crc32fast", - "libc", "libz-sys", "miniz_oxide", ] @@ -521,11 +593,10 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -601,9 +672,9 @@ checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -616,9 +687,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -626,15 +697,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -643,15 +714,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", @@ -660,21 +731,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures-channel", "futures-core", @@ -690,9 +761,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -700,13 +771,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -717,9 +788,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -730,7 +801,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.1", + "tokio-util", "tracing", ] @@ -754,18 +825,18 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452c155cb93fecdfb02a73dd57b5d8e442c2063bd7aac72f1bc5e4263a43086" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ "hashbrown 0.12.3", ] [[package]] name = "headers" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64", "bitflags", @@ -774,7 +845,7 @@ dependencies = [ "http", "httpdate", "mime", - "sha-1 0.10.0", + "sha1 0.10.5", ] [[package]] @@ -803,9 +874,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", @@ -814,9 +885,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -825,9 +896,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -843,9 +914,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.18" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -865,6 +936,30 @@ dependencies = [ "want", ] +[[package]] +name = "iana-time-zone" +version = "0.1.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5a6ef98976b22b3b7f2f3a806f858cb862044cfa66805aa3ad84cb3d3b785ed" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -873,24 +968,23 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" -version = "1.8.1" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", - "hashbrown 0.11.2", - "serde 1.0.137", + "hashbrown 0.12.3", + "serde", ] [[package]] @@ -916,19 +1010,30 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "js-sys" -version = "0.3.57" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -966,9 +1071,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.125" +version = "0.2.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5916d2ae698f6de9bfb891ad7a8d65c09d232dc58cc4ac433c7da3b2fd84bc2b" +checksum = "55edcf6c0bb319052dea84732cf99db461780fd5e8d3eb46ab6ff312ab31f197" [[package]] name = "libloading" @@ -993,26 +1098,35 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.6" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e7e15d7610cce1d9752e137625f14e61a28cd45929b6e12e47b50fe154ee2e" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" dependencies = [ "cc", "pkg-config", "vcpkg", ] +[[package]] +name = "link-cplusplus" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +dependencies = [ + "cc", +] + [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", @@ -1036,12 +1150,6 @@ dependencies = [ "hashbrown 0.11.2", ] -[[package]] -name = "matches" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" - [[package]] name = "memchr" version = "2.5.0" @@ -1073,36 +1181,31 @@ dependencies = [ "unicase", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" -version = "0.5.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", - "miow", - "ntapi", "wasi 0.11.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "windows-sys 0.36.1", ] [[package]] @@ -1141,7 +1244,7 @@ dependencies = [ "once_cell", "pem", "percent-encoding", - "serde 1.0.137", + "serde", "serde_json", "socket2", "twox-hash", @@ -1170,14 +1273,14 @@ dependencies = [ "lazy_static", "lexical", "num-bigint 0.4.3", - "num-traits 0.2.15", + "num-traits", "rand", "regex", "rust_decimal", "saturating", - "serde 1.0.137", + "serde", "serde_json", - "sha1", + "sha1 0.6.1", "sha2", "smallvec", "subprocess", @@ -1232,18 +1335,18 @@ version = "5.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" dependencies = [ - "lexical-core", "memchr", "version_check", ] [[package]] -name = "ntapi" -version = "0.3.7" +name = "nom" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" dependencies = [ - "winapi", + "memchr", + "minimal-lexical", ] [[package]] @@ -1254,7 +1357,7 @@ checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.15", + "num-traits", ] [[package]] @@ -1265,7 +1368,7 @@ checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.15", + "num-traits", ] [[package]] @@ -1275,16 +1378,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", - "num-traits 0.2.15", -] - -[[package]] -name = "num-traits" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" -dependencies = [ - "num-traits 0.2.15", + "num-traits", ] [[package]] @@ -1317,9 +1411,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.10.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" +checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" [[package]] name = "opaque-debug" @@ -1329,9 +1423,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.41" +version = "0.10.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" +checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" dependencies = [ "bitflags", "cfg-if", @@ -1370,9 +1464,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.75" +version = "0.9.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" +checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" dependencies = [ "autocfg", "cc", @@ -1382,31 +1476,45 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "ordered-multimap" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" +dependencies = [ + "dlv-list", + "hashbrown 0.12.3", +] + [[package]] name = "parking_lot" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "cfg-if", - "instant", "libc", "redox_syscall", "smallvec", - "winapi", + "windows-sys 0.42.0", ] +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -1426,24 +1534,68 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" + +[[package]] +name = "pest" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" +dependencies = [ + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b75706b9642ebcb34dab3bc7750f811609a0eb1dd8b88c2d15bf628c1c65b2" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f9272122f5979a6511a749af9db9bfc810393f63119970d7085fed1c4ea0db" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pest_meta" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "4c8717927f9b79515e565a64fe46c38b8cd0427e64c40680b14a7365ab09ac8d" +dependencies = [ + "once_cell", + "pest", + "sha1 0.10.5", +] [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -1482,11 +1634,11 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.38" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9027b48e9d4c9175fa2218adf3557f91c1137021739951d4932f5f8268ac48aa" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1497,18 +1649,18 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.18" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] [[package]] name = "r2d2" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", "parking_lot", @@ -1564,27 +1716,27 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.5.5" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -1593,9 +1745,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "remove_dir_all" @@ -1621,6 +1773,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "ron" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +dependencies = [ + "base64", + "bitflags", + "serde", +] + [[package]] name = "rusqlite" version = "0.28.0" @@ -1637,19 +1800,23 @@ dependencies = [ [[package]] name = "rust-ini" -version = "0.13.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" +checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +dependencies = [ + "cfg-if", + "ordered-multimap", +] [[package]] name = "rust_decimal" -version = "1.23.1" +version = "1.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22dc69eadbf0ee2110b8d20418c0c6edbaefec2811c4963dc17b6344e11fe0f8" +checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" dependencies = [ "arrayvec 0.7.2", - "num-traits 0.2.15", - "serde 1.0.137", + "num-traits", + "serde", ] [[package]] @@ -1673,27 +1840,35 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.9", + "semver 1.0.14", ] [[package]] name = "rustls" -version = "0.19.1" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ - "base64", "log", "ring", "sct", "webpki", ] +[[package]] +name = "rustls-pemfile" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +dependencies = [ + "base64", +] + [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "safemem" @@ -1709,19 +1884,19 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys 0.36.1", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" +checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" dependencies = [ "parking_lot", ] @@ -1738,11 +1913,17 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" + [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", @@ -1750,9 +1931,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -1782,9 +1963,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.9" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" +checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" [[package]] name = "semver-parser" @@ -1794,55 +1975,37 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "0.8.23" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" - -[[package]] -name = "serde" -version = "1.0.137" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] -[[package]] -name = "serde-hjson" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a3a4e0ea8a88553209f6cc6cfe8724ecad22e1acf372793c27d995290fe74f8" -dependencies = [ - "lazy_static", - "num-traits 0.1.43", - "regex", - "serde 0.8.23", -] - [[package]] name = "serde_bencode" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" dependencies = [ - "serde 1.0.137", + "serde", "serde_bytes", ] [[package]] name = "serde_bytes" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212e73464ebcde48d723aa02eb270ba62eff38a9b732df31f33f1b4e145f3a54" +checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" dependencies = [ - "serde 1.0.137", + "serde", ] [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", @@ -1851,13 +2014,13 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ "itoa", "ryu", - "serde 1.0.137", + "serde", ] [[package]] @@ -1869,30 +2032,30 @@ dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.137", + "serde", ] [[package]] name = "serde_with" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89df7a26519371a3cce44fbb914c2819c84d9b897890987fa3ab096491cc0ea8" +checksum = "368f2d60d049ea019a84dcd6687b0d1e0030fe663ae105039bdf967ed5e6a9a7" dependencies = [ "base64", "chrono", "hex", "indexmap", - "serde 1.0.137", + "serde", "serde_json", "serde_with_macros", - "time 0.3.13", + "time 0.3.16", ] [[package]] name = "serde_with_macros" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de337f322382fcdfbb21a014f7c224ee041a23785651db67b9827403178f698f" +checksum = "1ccadfacf6cf10faad22bbadf55986bdd0856edfb5d9210aa1dcf1f516e84e93" dependencies = [ "darling", "proc-macro2", @@ -1900,19 +2063,6 @@ dependencies = [ "syn", ] -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - [[package]] name = "sha-1" version = "0.10.0" @@ -1921,7 +2071,7 @@ checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -1933,6 +2083,17 @@ dependencies = [ "sha1_smol", ] +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.5", +] + [[package]] name = "sha1_smol" version = "1.0.0" @@ -1969,21 +2130,24 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] [[package]] name = "smallvec" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -2032,7 +2196,7 @@ checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ "proc-macro2", "quote", - "serde 1.0.137", + "serde", "serde_derive", "syn", ] @@ -2046,10 +2210,10 @@ dependencies = [ "base-x", "proc-macro2", "quote", - "serde 1.0.137", + "serde", "serde_derive", "serde_json", - "sha1", + "sha1 0.6.1", "syn", ] @@ -2073,9 +2237,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subprocess" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "055cf3ebc2981ad8f0a5a17ef6652f652d87831f79fddcba2ac57bcb9a0aa407" +checksum = "0c2e86926081dda636c546d8c5e641661049d7562a68f5488be4a1f7f66f6086" dependencies = [ "libc", "winapi", @@ -2083,13 +2247,13 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.92" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff7c592601f11445996a06f8ad0c27f094a58857c2f89e97974ab9235b92c52" +checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] @@ -2132,18 +2296,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -2171,23 +2335,31 @@ dependencies = [ "libc", "standback", "stdweb", - "time-macros", + "time-macros 0.1.1", "version_check", "winapi", ] [[package]] name = "time" -version = "0.3.13" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db76ff9fa4b1458b3c7f077f3ff9887394058460d21e634355b273aaf11eea45" +checksum = "0fab5c8b9980850e06d92ddbe3ab839c062c801f3927c0fb8abd6fc8e918fbca" dependencies = [ "itoa", "libc", "num_threads", - "serde 1.0.137", + "serde", + "time-core", + "time-macros 0.2.5", ] +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + [[package]] name = "time-macros" version = "0.1.1" @@ -2198,6 +2370,15 @@ dependencies = [ "time-macros-impl", ] +[[package]] +name = "time-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bb801831d812c562ae7d2bfb531f26e66e4e1f6b17307ba4149c5064710e5b" +dependencies = [ + "time-core", +] + [[package]] name = "time-macros-impl" version = "0.1.2" @@ -2228,16 +2409,16 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.18.1" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce653fb475565de9f6fb0614b28bca8df2c430c0cf84bcd9c843f15de5414cc" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" dependencies = [ + "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", - "once_cell", "pin-project-lite", "signal-hook-registry", "socket2", @@ -2258,9 +2439,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.22.0" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", "tokio", @@ -2269,9 +2450,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite", @@ -2280,36 +2461,21 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.15.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511de3f85caf1c98983545490c3d09685fa8eb634e57eec22bb4db271f46cbd8" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "pin-project", "tokio", "tungstenite", ] [[package]] name = "tokio-util" -version = "0.6.9" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -2325,7 +2491,7 @@ version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ - "serde 1.0.137", + "serde", ] [[package]] @@ -2349,54 +2515,42 @@ dependencies = [ "r2d2_mysql", "r2d2_sqlite", "rand", - "serde 1.0.137", + "serde", "serde_bencode", "serde_json", "serde_with", "thiserror", "tokio", "toml", - "uuid 1.1.2", + "uuid 1.2.1", "warp", ] [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.34" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", "pin-project-lite", - "tracing-attributes", "tracing-core", ] -[[package]] -name = "tracing-attributes" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "tracing-core" -version = "0.1.26" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -2407,9 +2561,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.14.0" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0b2d8558abd2e276b0a8df5c05a2ec762609344191e5fd23e292c910e9165b5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ "base64", "byteorder", @@ -2418,7 +2572,7 @@ dependencies = [ "httparse", "log", "rand", - "sha-1 0.9.8", + "sha-1", "thiserror", "url", "utf-8", @@ -2450,6 +2604,12 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +[[package]] +name = "ucd-trie" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" + [[package]] name = "unicase" version = "2.6.0" @@ -2465,26 +2625,26 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +[[package]] +name = "unicode-ident" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" + [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" - -[[package]] -name = "unicode-xid" -version = "0.2.3" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "untrusted" @@ -2494,13 +2654,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", ] @@ -2518,9 +2677,9 @@ checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" [[package]] name = "uuid" -version = "1.1.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" +checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" dependencies = [ "getrandom", ] @@ -2555,9 +2714,9 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e" +checksum = "ed7b8be92646fc3d18b06147664ebc5f48d222686cb11a8755e561a735aacc6d" dependencies = [ "bytes", "futures-channel", @@ -2571,15 +2730,16 @@ dependencies = [ "multipart", "percent-encoding", "pin-project", + "rustls-pemfile", "scoped-tls", - "serde 1.0.137", + "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", "tokio-stream", "tokio-tungstenite", - "tokio-util 0.6.9", + "tokio-util", "tower-service", "tracing", ] @@ -2598,9 +2758,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2608,13 +2768,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -2623,9 +2783,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2633,9 +2793,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -2646,15 +2806,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.80" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "web-sys" -version = "0.3.57" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -2662,9 +2822,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.4" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ "ring", "untrusted", @@ -2710,6 +2870,106 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", +] + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + [[package]] name = "wyz" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 89fdffa99..c7e3790bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ lto = "fat" strip = true [dependencies] -tokio = { version = "1.7", features = [ +tokio = { version = "1", features = [ "rt-multi-thread", "net", "sync", @@ -29,33 +29,33 @@ tokio = { version = "1.7", features = [ serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" -serde_json = "1.0.72" -serde_with = "2.0.0" +serde_json = "1.0" +serde_with = "2.0" hex = "0.4.3" -percent-encoding = "2.1.0" +percent-encoding = "2" binascii = "0.1" -lazy_static = "1.4.0" +lazy_static = "1.4" -openssl = { version = "0.10.41", features = ["vendored"] } +openssl = { version = "0.10", features = ["vendored"] } warp = { version = "0.3", features = ["tls"] } -config = "0.11" +config = "0.13" toml = "0.5" log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" -r2d2 = "0.8.8" -r2d2_mysql = "21.0.0" -r2d2_sqlite = { version = "0.21.0", features = ["bundled"] } +r2d2 = "0.8" +r2d2_mysql = "21" +r2d2_sqlite = { version = "0.21", features = ["bundled"] } -rand = "0.8.4" +rand = "0.8" derive_more = "0.99" thiserror = "1.0" -futures = "0.3.21" -async-trait = "0.1.52" +futures = "0.3" +async-trait = "0.1" -aquatic_udp_protocol = "0.2.0" -uuid = { version = "1.1.2", features = ["v4"] } +aquatic_udp_protocol = "0.2" +uuid = { version = "1", features = ["v4"] } diff --git a/src/config.rs b/src/config.rs index c094eb2f9..b59d572ea 100644 --- a/src/config.rs +++ b/src/config.rs @@ -128,10 +128,13 @@ impl Configuration { } pub fn load_from_file(path: &str) -> Result { - let mut config = Config::new(); + let config_builder = Config::builder(); + + #[allow(unused_assignments)] + let mut config = Config::default(); if Path::new(path).exists() { - config.merge(File::with_name(path))?; + config = config_builder.add_source(File::with_name(path)).build()?; } else { eprintln!("No config file found."); eprintln!("Creating config file.."); @@ -143,7 +146,7 @@ impl Configuration { } let torrust_config: Configuration = config - .try_into() + .try_deserialize() .map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; Ok(torrust_config) From a334f17e5f62bc9357cc520c7d4fd60a0a1b04b3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 14:33:56 +0100 Subject: [PATCH 100/435] refactor: extract struct StatsEventSender --- src/tracker/statistics.rs | 44 ++++++++++++++++++++++++++++----------- src/udp/handlers.rs | 15 ++++++++----- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index fb4e4c0fe..7d1d17c51 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -62,24 +62,31 @@ pub struct StatsTracker { } impl StatsTracker { - pub fn new_active_instance() -> Self { - Self::new_instance(true) - } + pub fn new_active_instance() -> (Self, StatsEventSender) { + let mut stats_tracker = Self { + channel_sender: None, + stats: Arc::new(RwLock::new(TrackerStatistics::new())), + }; - pub fn new_inactive_instance() -> Self { - Self::new_instance(false) + let stats_event_sender = stats_tracker.run_worker(); + + (stats_tracker, stats_event_sender) } - pub fn new_instance(active: bool) -> Self { - let mut stats_tracker = Self { + pub fn new_inactive_instance() -> Self { + Self { channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), - }; + } + } - if active { - stats_tracker.run_worker(); + pub fn new_instance(active: bool) -> Self { + if !active { + return Self::new_inactive_instance(); } + let (stats_tracker, _stats_event_sender) = Self::new_active_instance(); + stats_tracker } @@ -90,11 +97,11 @@ impl StatsTracker { } } - pub fn run_worker(&mut self) { + pub fn run_worker(&mut self) -> StatsEventSender { let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); // set send channel on stats_tracker - self.channel_sender = Some(tx); + self.channel_sender = Some(tx.clone()); let stats = self.stats.clone(); @@ -142,6 +149,8 @@ impl StatsTracker { drop(stats_lock); } }); + + StatsEventSender { sender: tx } } } @@ -161,6 +170,17 @@ impl TrackerStatisticsEventSender for StatsTracker { } } +pub struct StatsEventSender { + sender: Sender, +} + +#[async_trait] +impl TrackerStatisticsEventSender for StatsEventSender { + async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { + Some(self.sender.send(event).await) + } +} + #[async_trait] pub trait TrackerStatisticsRepository: Sync + Send { async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics>; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 845b860e9..7992bcaf0 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -271,19 +271,24 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) + initialized_tracker(configuration) } fn initialized_private_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) + initialized_tracker(configuration) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()) + initialized_tracker(configuration) } + fn initialized_tracker(configuration: Arc) -> Arc { + let (stats_tracker, _stats_event_sender) = StatsTracker::new_active_instance(); + Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker)).unwrap()) + } + fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() } @@ -969,8 +974,8 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let tracker = - Arc::new(TorrentTracker::new(configuration, Box::new(StatsTracker::new_active_instance())).unwrap()); + let (stats_tracker, _stats_event_sender) = StatsTracker::new_active_instance(); + let tracker = Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker)).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); From b784442cb676edaeee6caa8941c2f050d0e9e897 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 16:25:17 +0100 Subject: [PATCH 101/435] refactor: inject new struct StatsEventSender into TorrentTracker Parallel change. We are still using the old TrackerStatsService to send events. --- src/main.rs | 10 ++++- src/tracker/statistics.rs | 6 +-- src/tracker/tracker.rs | 10 ++++- src/udp/handlers.rs | 84 ++++++++++++++++++++++++++++++--------- tests/udp.rs | 5 ++- 5 files changed, 88 insertions(+), 27 deletions(-) diff --git a/src/main.rs b/src/main.rs index dcb92acb8..f995ba377 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,10 +24,16 @@ async fn main() { }; // Initialize stats tracker - let stats_tracker = StatsTracker::new_instance(config.tracker_usage_statistics); + let mut stats_tracker = StatsTracker::new_inactive_instance(); + + let mut stats_event_sender = None; + + if config.tracker_usage_statistics { + stats_event_sender = Some(stats_tracker.run_worker()); + } // Initialize Torrust tracker - let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker)) { + let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker), stats_event_sender) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 7d1d17c51..8b57d6bfe 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -62,7 +62,7 @@ pub struct StatsTracker { } impl StatsTracker { - pub fn new_active_instance() -> (Self, StatsEventSender) { + pub fn new_active_instance() -> (Self, Box) { let mut stats_tracker = Self { channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), @@ -97,7 +97,7 @@ impl StatsTracker { } } - pub fn run_worker(&mut self) -> StatsEventSender { + pub fn run_worker(&mut self) -> Box { let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); // set send channel on stats_tracker @@ -150,7 +150,7 @@ impl StatsTracker { } }); - StatsEventSender { sender: tx } + Box::new(StatsEventSender { sender: tx }) } } diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 5499eebeb..b1d009077 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -12,7 +12,7 @@ use crate::databases::database::Database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; -use crate::statistics::{TrackerStatistics, TrackerStatisticsEvent, TrackerStatsService}; +use crate::statistics::{TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatsService}; use crate::tracker::key; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; @@ -25,11 +25,16 @@ pub struct TorrentTracker { whitelist: RwLock>, torrents: RwLock>, stats_tracker: Box, + _stats_event_sender: Option>, database: Box, } impl TorrentTracker { - pub fn new(config: Arc, stats_tracker: Box) -> Result { + pub fn new( + config: Arc, + stats_tracker: Box, + _stats_event_sender: Option>, + ) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; Ok(TorrentTracker { @@ -39,6 +44,7 @@ impl TorrentTracker { whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), stats_tracker, + _stats_event_sender, database, }) } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 7992bcaf0..fc3e0968f 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -285,9 +285,9 @@ mod tests { } fn initialized_tracker(configuration: Arc) -> Arc { - let (stats_tracker, _stats_event_sender) = StatsTracker::new_active_instance(); - Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker)).unwrap()) - } + let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); + Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker), Some(stats_event_sender)).unwrap()) + } fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() @@ -371,6 +371,30 @@ mod tests { } } + struct StatsEventSenderMock { + expected_event: Option, + } + + impl StatsEventSenderMock { + fn new() -> Self { + Self { expected_event: None } + } + + fn should_throw_event(&mut self, expected_event: TrackerStatisticsEvent) { + self.expected_event = Some(expected_event); + } + } + + #[async_trait] + impl TrackerStatisticsEventSender for StatsEventSenderMock { + async fn send_event(&self, _event: TrackerStatisticsEvent) -> Option>> { + if self.expected_event.is_some() { + assert_eq!(_event, *self.expected_event.as_ref().unwrap()); + } + None + } + } + #[async_trait] impl TrackerStatisticsRepository for TrackerStatsServiceMock { async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { @@ -413,7 +437,10 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; - use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use super::{ + default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, StatsEventSenderMock, + TrackerStatsServiceMock, + }; use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; @@ -467,11 +494,13 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); let client_socket_address = sample_ipv4_socket_address(); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Connect); - let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let torrent_tracker = + Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await .unwrap(); @@ -480,10 +509,12 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Connect); - let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let torrent_tracker = + Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await .unwrap(); @@ -577,8 +608,8 @@ mod tests { use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, - TrackerStatsServiceMock, + default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, StatsEventSenderMock, + TorrentPeerBuilder, TrackerStatsServiceMock, }; use crate::PeerId; @@ -718,10 +749,13 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Announce); - let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + ); handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), @@ -794,8 +828,8 @@ mod tests { use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, - TrackerStatsServiceMock, + default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, StatsEventSenderMock, + TorrentPeerBuilder, TrackerStatsServiceMock, }; use crate::PeerId; @@ -942,10 +976,13 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Announce); - let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + ); let remote_addr = sample_ipv6_remote_addr(); @@ -974,8 +1011,9 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (stats_tracker, _stats_event_sender) = StatsTracker::new_active_instance(); - let tracker = Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker)).unwrap()); + let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); + let tracker = + Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker), Some(stats_event_sender)).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1243,16 +1281,21 @@ mod tests { use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}; + use crate::udp::handlers::tests::{ + default_tracker_config, sample_ipv4_remote_addr, StatsEventSenderMock, TrackerStatsServiceMock, + }; #[tokio::test] async fn should_send_the_upd4_scrape_event() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); let remote_addr = sample_ipv4_remote_addr(); - let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) .await @@ -1267,16 +1310,21 @@ mod tests { use crate::statistics::TrackerStatisticsEvent; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use crate::udp::handlers::tests::{ + default_tracker_config, sample_ipv6_remote_addr, StatsEventSenderMock, TrackerStatsServiceMock, + }; #[tokio::test] async fn should_send_the_upd6_scrape_event() { let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let stats_event_sender = Box::new(StatsEventSenderMock::new()); tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); let remote_addr = sample_ipv6_remote_addr(); - let tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service).unwrap()); + let tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) .await diff --git a/tests/udp.rs b/tests/udp.rs index b391b922f..d2b500d5a 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -51,10 +51,11 @@ mod udp_tracker_server { lazy_static::initialize(&static_time::TIME_AT_APP_START); // Initialize stats tracker - let stats_tracker = StatsTracker::new_active_instance(); + let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker)) { + let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker), Some(stats_event_sender)) + { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) From 720a5841c943364bfd99fba2337ea024324b6293 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 16:40:05 +0100 Subject: [PATCH 102/435] refactor: use StatsEventSender to send events instead of StatsTracker. --- src/tracker/tracker.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index b1d009077..80f6e549d 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -25,7 +25,7 @@ pub struct TorrentTracker { whitelist: RwLock>, torrents: RwLock>, stats_tracker: Box, - _stats_event_sender: Option>, + stats_event_sender: Option>, database: Box, } @@ -33,7 +33,7 @@ impl TorrentTracker { pub fn new( config: Arc, stats_tracker: Box, - _stats_event_sender: Option>, + stats_event_sender: Option>, ) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; @@ -44,7 +44,7 @@ impl TorrentTracker { whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), stats_tracker, - _stats_event_sender, + stats_event_sender, database, }) } @@ -242,7 +242,10 @@ impl TorrentTracker { } pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { - self.stats_tracker.send_event(event).await + match &self.stats_event_sender { + None => None, + Some(stats_event_sender) => stats_event_sender.send_event(event).await, + } } // Remove inactive peers and (optionally) peerless torrents From 5b73d801c99f6f2d92125ad46c2cc8a39b4c68c1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 17:08:24 +0100 Subject: [PATCH 103/435] refactor: removed unused code and extract fn - The StatsTracker does not need anymore the channel sender. - A setup function for statistics was extracted. --- src/lib.rs | 1 + src/main.rs | 12 +++------- src/stats.rs | 36 ++++++++++++++++++++++++++++ src/tracker/statistics.rs | 50 +-------------------------------------- 4 files changed, 41 insertions(+), 58 deletions(-) create mode 100644 src/stats.rs diff --git a/src/lib.rs b/src/lib.rs index 5f003b5fd..cf830f108 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,6 +14,7 @@ pub mod jobs; pub mod logging; pub mod protocol; pub mod setup; +pub mod stats; pub mod tracker; pub mod udp; diff --git a/src/main.rs b/src/main.rs index f995ba377..c21aa1793 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use log::info; -use torrust_tracker::tracker::statistics::StatsTracker; +use torrust_tracker::stats::setup_statistics; use torrust_tracker::tracker::tracker::TorrentTracker; use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; @@ -23,14 +23,8 @@ async fn main() { } }; - // Initialize stats tracker - let mut stats_tracker = StatsTracker::new_inactive_instance(); - - let mut stats_event_sender = None; - - if config.tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_worker()); - } + // Initialize statistics:wq + let (stats_tracker, stats_event_sender) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker), stats_event_sender) { diff --git a/src/stats.rs b/src/stats.rs new file mode 100644 index 000000000..d459d8f5b --- /dev/null +++ b/src/stats.rs @@ -0,0 +1,36 @@ +use crate::statistics::{StatsTracker, TrackerStatisticsEventSender}; + +pub fn setup_statistics(tracker_usage_statistics: bool) -> (StatsTracker, Option>) { + let mut stats_tracker = StatsTracker::new_inactive_instance(); + + let mut stats_event_sender = None; + + if tracker_usage_statistics { + stats_event_sender = Some(stats_tracker.run_worker()); + } + + (stats_tracker, stats_event_sender) +} + +#[cfg(test)] +mod test { + use crate::stats::setup_statistics; + + #[tokio::test] + async fn should_not_send_any_event_when_statistics_are_disabled() { + let tracker_usage_statistics = false; + + let (_stats_tracker, stats_event_sender) = setup_statistics(tracker_usage_statistics); + + assert!(stats_event_sender.is_none()); + } + + #[tokio::test] + async fn should_send_events_when_statistics_are_enabled() { + let tracker_usage_statistics = true; + + let (_stats_tracker, stats_event_sender) = setup_statistics(tracker_usage_statistics); + + assert!(stats_event_sender.is_some()); + } +} diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 8b57d6bfe..a89b5d4cc 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -57,14 +57,12 @@ impl TrackerStatistics { } pub struct StatsTracker { - channel_sender: Option>, pub stats: Arc>, } impl StatsTracker { pub fn new_active_instance() -> (Self, Box) { let mut stats_tracker = Self { - channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), }; @@ -75,7 +73,6 @@ impl StatsTracker { pub fn new_inactive_instance() -> Self { Self { - channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), } } @@ -92,7 +89,6 @@ impl StatsTracker { pub fn new() -> Self { Self { - channel_sender: None, stats: Arc::new(RwLock::new(TrackerStatistics::new())), } } @@ -100,9 +96,6 @@ impl StatsTracker { pub fn run_worker(&mut self) -> Box { let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - // set send channel on stats_tracker - self.channel_sender = Some(tx.clone()); - let stats = self.stats.clone(); tokio::spawn(async move { @@ -159,17 +152,6 @@ pub trait TrackerStatisticsEventSender: Sync + Send { async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>>; } -#[async_trait] -impl TrackerStatisticsEventSender for StatsTracker { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { - if let Some(tx) = &self.channel_sender { - Some(tx.send(event).await) - } else { - None - } - } -} - pub struct StatsEventSender { sender: Sender, } @@ -193,36 +175,6 @@ impl TrackerStatisticsRepository for StatsTracker { } } -pub trait TrackerStatsService: TrackerStatisticsEventSender + TrackerStatisticsRepository {} +pub trait TrackerStatsService: TrackerStatisticsRepository {} impl TrackerStatsService for StatsTracker {} - -#[cfg(test)] -mod test { - - mod event_sender { - use crate::statistics::{StatsTracker, TrackerStatisticsEvent, TrackerStatisticsEventSender}; - - #[tokio::test] - async fn should_not_send_any_event_when_statistics_are_disabled() { - let tracker_usage_statistics = false; - - let inactive_stats_tracker = StatsTracker::new_instance(tracker_usage_statistics); - - let result = inactive_stats_tracker.send_event(TrackerStatisticsEvent::Tcp4Announce).await; - - assert!(result.is_none()); - } - - #[tokio::test] - async fn should_send_events_when_statistics_are_enabled() { - let tracker_usage_statistics = true; - - let active_stats_tracker = StatsTracker::new_instance(tracker_usage_statistics); - - let result = active_stats_tracker.send_event(TrackerStatisticsEvent::Tcp4Announce).await; - - assert!(result.is_some()); - } - } -} From daec1fed0553e397ce0aa9823f26f1b6ed42a249 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 17:23:46 +0100 Subject: [PATCH 104/435] refactor: extract stats event_listener --- src/tracker/statistics.rs | 97 ++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 46 deletions(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index a89b5d4cc..66aea0169 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use async_trait::async_trait; +use log::debug; use tokio::sync::mpsc::error::SendError; -use tokio::sync::mpsc::Sender; +use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; @@ -94,59 +95,63 @@ impl StatsTracker { } pub fn run_worker(&mut self) -> Box { - let (tx, mut rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + let (tx, rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); let stats = self.stats.clone(); - tokio::spawn(async move { - while let Some(event) = rx.recv().await { - let mut stats_lock = stats.write().await; - - match event { - TrackerStatisticsEvent::Tcp4Announce => { - stats_lock.tcp4_announces_handled += 1; - stats_lock.tcp4_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp4Scrape => { - stats_lock.tcp4_scrapes_handled += 1; - stats_lock.tcp4_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp6Announce => { - stats_lock.tcp6_announces_handled += 1; - stats_lock.tcp6_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp6Scrape => { - stats_lock.tcp6_scrapes_handled += 1; - stats_lock.tcp6_connections_handled += 1; - } - TrackerStatisticsEvent::Udp4Connect => { - stats_lock.udp4_connections_handled += 1; - } - TrackerStatisticsEvent::Udp4Announce => { - stats_lock.udp4_announces_handled += 1; - } - TrackerStatisticsEvent::Udp4Scrape => { - stats_lock.udp4_scrapes_handled += 1; - } - TrackerStatisticsEvent::Udp6Connect => { - stats_lock.udp6_connections_handled += 1; - } - TrackerStatisticsEvent::Udp6Announce => { - stats_lock.udp6_announces_handled += 1; - } - TrackerStatisticsEvent::Udp6Scrape => { - stats_lock.udp6_scrapes_handled += 1; - } - } - - drop(stats_lock); - } - }); + tokio::spawn(async move { event_listener(rx, stats).await }); Box::new(StatsEventSender { sender: tx }) } } +async fn event_listener(mut rx: Receiver, stats: Arc>) { + while let Some(event) = rx.recv().await { + let mut stats_lock = stats.write().await; + + match event { + TrackerStatisticsEvent::Tcp4Announce => { + stats_lock.tcp4_announces_handled += 1; + stats_lock.tcp4_connections_handled += 1; + } + TrackerStatisticsEvent::Tcp4Scrape => { + stats_lock.tcp4_scrapes_handled += 1; + stats_lock.tcp4_connections_handled += 1; + } + TrackerStatisticsEvent::Tcp6Announce => { + stats_lock.tcp6_announces_handled += 1; + stats_lock.tcp6_connections_handled += 1; + } + TrackerStatisticsEvent::Tcp6Scrape => { + stats_lock.tcp6_scrapes_handled += 1; + stats_lock.tcp6_connections_handled += 1; + } + TrackerStatisticsEvent::Udp4Connect => { + stats_lock.udp4_connections_handled += 1; + } + TrackerStatisticsEvent::Udp4Announce => { + stats_lock.udp4_announces_handled += 1; + } + TrackerStatisticsEvent::Udp4Scrape => { + stats_lock.udp4_scrapes_handled += 1; + } + TrackerStatisticsEvent::Udp6Connect => { + stats_lock.udp6_connections_handled += 1; + } + TrackerStatisticsEvent::Udp6Announce => { + stats_lock.udp6_announces_handled += 1; + } + TrackerStatisticsEvent::Udp6Scrape => { + stats_lock.udp6_scrapes_handled += 1; + } + } + + debug!("stats: {:?}", stats_lock); + + drop(stats_lock); + } +} + #[async_trait] pub trait TrackerStatisticsEventSender: Sync + Send { async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>>; From a2b16ff7b2d3bdd97d994cf19cc33cb6b8b4be62 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 17:35:22 +0100 Subject: [PATCH 105/435] fix: tests using mock for old service I only change test to use the new mock. I realized test were wrong becuase they do not fail when no event is sent. THey only fail when the event sent is not the rigth type. --- src/udp/handlers.rs | 56 ++++++++++++++++----------------------------- 1 file changed, 20 insertions(+), 36 deletions(-) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index fc3e0968f..ba545da1b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -345,29 +345,13 @@ mod tests { struct TrackerStatsServiceMock { stats: Arc>, - expected_event: Option, } impl TrackerStatsServiceMock { fn new() -> Self { Self { stats: Arc::new(RwLock::new(TrackerStatistics::new())), - expected_event: None, - } - } - - fn should_throw_event(&mut self, expected_event: TrackerStatisticsEvent) { - self.expected_event = Some(expected_event); - } - } - - #[async_trait] - impl TrackerStatisticsEventSender for TrackerStatsServiceMock { - async fn send_event(&self, _event: TrackerStatisticsEvent) -> Option>> { - if self.expected_event.is_some() { - assert_eq!(_event, *self.expected_event.as_ref().unwrap()); } - None } } @@ -387,9 +371,9 @@ mod tests { #[async_trait] impl TrackerStatisticsEventSender for StatsEventSenderMock { - async fn send_event(&self, _event: TrackerStatisticsEvent) -> Option>> { + async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { if self.expected_event.is_some() { - assert_eq!(_event, *self.expected_event.as_ref().unwrap()); + assert_eq!(event, *self.expected_event.as_ref().unwrap()); } None } @@ -493,11 +477,11 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); let client_socket_address = sample_ipv4_socket_address(); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Connect); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Connect); let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); @@ -508,10 +492,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Connect); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Connect); let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); @@ -748,10 +732,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Announce); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Announce); let tracker = Arc::new( TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), @@ -975,10 +959,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Announce); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Announce); let tracker = Arc::new( TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), @@ -1287,10 +1271,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( @@ -1316,10 +1300,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let stats_event_sender = Box::new(StatsEventSenderMock::new()); + let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); + let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - tracker_stats_service.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); + stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( From bc3df5a74b2fc66155bd88aec1b4fad1942da379 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Oct 2022 18:29:18 +0100 Subject: [PATCH 106/435] fix typo --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index c21aa1793..bfcce014b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -23,7 +23,7 @@ async fn main() { } }; - // Initialize statistics:wq + // Initialize statistics let (stats_tracker, stats_event_sender) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker From e5701103788f79c61bb175915a6467618be096a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Oct 2022 16:33:46 +0100 Subject: [PATCH 107/435] test: add new dev dependency mockall It will be used to mock a trait in tests. --- Cargo.lock | 106 +++++++++++++++++++++++++++++++++++++++++++++++++++++ Cargo.toml | 3 ++ 2 files changed, 109 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 0a60397f9..ce66efa09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -479,6 +479,12 @@ dependencies = [ "syn", ] +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.9.0" @@ -510,6 +516,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "either" version = "1.8.0" @@ -570,6 +582,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -600,6 +621,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "frunk" version = "0.4.0" @@ -1008,6 +1035,15 @@ dependencies = [ "syn", ] +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.4" @@ -1208,6 +1244,33 @@ dependencies = [ "windows-sys 0.36.1", ] +[[package]] +name = "mockall" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50e4a1c770583dac7ab5e2f6c139153b783a53a1bbee9729613f193e59828326" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "multipart" version = "0.18.0" @@ -1349,6 +1412,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "num-bigint" version = "0.3.3" @@ -1626,6 +1695,36 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +[[package]] +name = "predicates" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5aab5be6e4732b473071984b3164dbbfb7a3674d30ea5ff44410b6bcd960c3c" +dependencies = [ + "difflib", + "float-cmp", + "itertools", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da1c2388b1513e1b605fcec39a95e0a9e8ef088f71443ef37099fa9ae6673fcb" + +[[package]] +name = "predicates-tree" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d86de6de25020a36c6d3643a86d9a6a9f552107c0559c60ea03551b5e16c032" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "proc-macro-hack" version = "0.5.19" @@ -2285,6 +2384,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "termtree" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507e9898683b6c43a9aa55b64259b721b52ba226e0f3779137e50ad114a4c90b" + [[package]] name = "textwrap" version = "0.11.0" @@ -2509,6 +2614,7 @@ dependencies = [ "hex", "lazy_static", "log", + "mockall", "openssl", "percent-encoding", "r2d2", diff --git a/Cargo.toml b/Cargo.toml index c7e3790bb..18188565c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,3 +59,6 @@ async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } + +[dev-dependencies] +mockall = "0.11.3" From 8874032074c12aecb1f250e054caac4d8c9c63f4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Oct 2022 16:34:55 +0100 Subject: [PATCH 108/435] fix: weak tests As I explained here: https://github.com/torrust/torrust-tracker/pull/103#issue-1418647900 The mock for the trait TrackerStatisticsEventSender did not work completely weel becuase it only checked for the rigth type of the triggered event but It did not check if the event was sent. I finally used a mockinf library becuase I do not know how to mock trait that uses a mutable reference to 'self'. I need to store wether the event was sent or not and I do not know how to do that without changing the function signature making it mutable. --- src/tracker/statistics.rs | 3 + src/udp/handlers.rs | 135 +++++++++++++++++++------------------- 2 files changed, 72 insertions(+), 66 deletions(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 66aea0169..73042ff3e 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -2,6 +2,8 @@ use std::sync::Arc; use async_trait::async_trait; use log::debug; +#[cfg(test)] +use mockall::{automock, predicate::*}; use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; @@ -153,6 +155,7 @@ async fn event_listener(mut rx: Receiver, stats: Arc Option>>; } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index ba545da1b..35d2e0247 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -252,16 +252,12 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use async_trait::async_trait; - use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, Time}; - use crate::statistics::{ - StatsTracker, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatisticsRepository, - TrackerStatsService, - }; + use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsRepository, TrackerStatsService}; use crate::tracker::tracker::TorrentTracker; use crate::{Configuration, PeerId}; @@ -355,30 +351,6 @@ mod tests { } } - struct StatsEventSenderMock { - expected_event: Option, - } - - impl StatsEventSenderMock { - fn new() -> Self { - Self { expected_event: None } - } - - fn should_throw_event(&mut self, expected_event: TrackerStatisticsEvent) { - self.expected_event = Some(expected_event); - } - } - - #[async_trait] - impl TrackerStatisticsEventSender for StatsEventSenderMock { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { - if self.expected_event.is_some() { - assert_eq!(event, *self.expected_event.as_ref().unwrap()); - } - None - } - } - #[async_trait] impl TrackerStatisticsRepository for TrackerStatsServiceMock { async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { @@ -417,15 +389,14 @@ mod tests { mod connect_request { + use std::future; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + use mockall::predicate::eq; - use super::{ - default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, StatsEventSenderMock, - TrackerStatsServiceMock, - }; - use crate::statistics::TrackerStatisticsEvent; + use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_connect; @@ -478,10 +449,15 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp4Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let client_socket_address = sample_ipv4_socket_address(); - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Connect); let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); @@ -493,9 +469,13 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Connect); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp6Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); @@ -578,6 +558,7 @@ mod tests { mod using_ipv4 { + use std::future; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; @@ -585,15 +566,16 @@ mod tests { AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; + use mockall::predicate::eq; - use crate::statistics::TrackerStatisticsEvent; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, StatsEventSenderMock, - TorrentPeerBuilder, TrackerStatsServiceMock, + default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, + TrackerStatsServiceMock, }; use crate::PeerId; @@ -733,13 +715,18 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Announce); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), ); + handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), @@ -798,6 +785,7 @@ mod tests { mod using_ipv6 { + use std::future; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; @@ -805,15 +793,16 @@ mod tests { AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; + use mockall::predicate::eq; - use crate::statistics::TrackerStatisticsEvent; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, StatsEventSenderMock, - TorrentPeerBuilder, TrackerStatsServiceMock, + default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, + TrackerStatsServiceMock, }; use crate::PeerId; @@ -960,9 +949,13 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Announce); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), @@ -1252,29 +1245,34 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), + connection_id: into_connection_id(&make_connection_cookie(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } } mod using_ipv4 { + use std::future; use std::sync::Arc; + use mockall::predicate::eq; + use super::sample_scrape_request; - use crate::statistics::TrackerStatisticsEvent; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{ - default_tracker_config, sample_ipv4_remote_addr, StatsEventSenderMock, TrackerStatsServiceMock, - }; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp4Scrape); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( @@ -1288,22 +1286,27 @@ mod tests { } mod using_ipv6 { + use std::future; use std::sync::Arc; + use mockall::predicate::eq; + use super::sample_scrape_request; - use crate::statistics::TrackerStatisticsEvent; + use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{ - default_tracker_config, sample_ipv6_remote_addr, StatsEventSenderMock, TrackerStatsServiceMock, - }; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); - let mut stats_event_sender = Box::new(StatsEventSenderMock::new()); - - stats_event_sender.should_throw_event(TrackerStatisticsEvent::Udp6Scrape); + let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(TrackerStatisticsEvent::Udp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( From d3297cf0b9011933cd5be0018db5f6e4f763c8a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Oct 2022 10:31:45 +0100 Subject: [PATCH 109/435] refactor: extract StatsRepository --- src/main.rs | 4 +- src/stats.rs | 16 ++++---- src/tracker/statistics.rs | 81 ++++++++++++++++++++------------------- src/tracker/tracker.rs | 10 ++--- src/udp/handlers.rs | 75 +++++++++++------------------------- tests/udp.rs | 5 +-- 6 files changed, 81 insertions(+), 110 deletions(-) diff --git a/src/main.rs b/src/main.rs index bfcce014b..08061cd7b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,10 +24,10 @@ async fn main() { }; // Initialize statistics - let (stats_tracker, stats_event_sender) = setup_statistics(config.tracker_usage_statistics); + let (stats_event_sender, stats_repository) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(config.clone(), Box::new(stats_tracker), stats_event_sender) { + let tracker = match TorrentTracker::new(config.clone(), stats_event_sender, stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/stats.rs b/src/stats.rs index d459d8f5b..1f387a084 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,15 +1,15 @@ -use crate::statistics::{StatsTracker, TrackerStatisticsEventSender}; - -pub fn setup_statistics(tracker_usage_statistics: bool) -> (StatsTracker, Option>) { - let mut stats_tracker = StatsTracker::new_inactive_instance(); +use crate::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; +pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, StatsRepository) { let mut stats_event_sender = None; + let mut stats_tracker = StatsTracker::new(); + if tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_worker()); + stats_event_sender = Some(stats_tracker.run_event_listener()); } - (stats_tracker, stats_event_sender) + (stats_event_sender, stats_tracker.stats_repository) } #[cfg(test)] @@ -20,7 +20,7 @@ mod test { async fn should_not_send_any_event_when_statistics_are_disabled() { let tracker_usage_statistics = false; - let (_stats_tracker, stats_event_sender) = setup_statistics(tracker_usage_statistics); + let (stats_event_sender, _stats_repository) = setup_statistics(tracker_usage_statistics); assert!(stats_event_sender.is_none()); } @@ -29,7 +29,7 @@ mod test { async fn should_send_events_when_statistics_are_enabled() { let tracker_usage_statistics = true; - let (_stats_tracker, stats_event_sender) = setup_statistics(tracker_usage_statistics); + let (stats_event_sender, _stats_repository) = setup_statistics(tracker_usage_statistics); assert!(stats_event_sender.is_some()); } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 73042ff3e..8f203c36e 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -40,6 +40,12 @@ pub struct TrackerStatistics { pub udp6_scrapes_handled: u64, } +impl Default for TrackerStatistics { + fn default() -> Self { + Self::new() + } +} + impl TrackerStatistics { pub fn new() -> Self { Self { @@ -60,56 +66,44 @@ impl TrackerStatistics { } pub struct StatsTracker { - pub stats: Arc>, + pub stats_repository: StatsRepository, } -impl StatsTracker { - pub fn new_active_instance() -> (Self, Box) { - let mut stats_tracker = Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), - }; - - let stats_event_sender = stats_tracker.run_worker(); - - (stats_tracker, stats_event_sender) - } - - pub fn new_inactive_instance() -> Self { - Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), - } +impl Default for StatsTracker { + fn default() -> Self { + Self::new() } +} - pub fn new_instance(active: bool) -> Self { - if !active { - return Self::new_inactive_instance(); - } +impl StatsTracker { + pub fn new_active_instance() -> (Box, StatsRepository) { + let mut stats_tracker = Self::new(); - let (stats_tracker, _stats_event_sender) = Self::new_active_instance(); + let stats_event_sender = stats_tracker.run_event_listener(); - stats_tracker + (stats_event_sender, stats_tracker.stats_repository) } pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), + stats_repository: StatsRepository::new(), } } - pub fn run_worker(&mut self) -> Box { - let (tx, rx) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - let stats = self.stats.clone(); + let stats_repository = self.stats_repository.clone(); - tokio::spawn(async move { event_listener(rx, stats).await }); + tokio::spawn(async move { event_listener(receiver, stats_repository).await }); - Box::new(StatsEventSender { sender: tx }) + Box::new(StatsEventSender { sender }) } } -async fn event_listener(mut rx: Receiver, stats: Arc>) { - while let Some(event) = rx.recv().await { - let mut stats_lock = stats.write().await; +async fn event_listener(mut receiver: Receiver, stats_repository: StatsRepository) { + while let Some(event) = receiver.recv().await { + let mut stats_lock = stats_repository.stats.write().await; match event { TrackerStatisticsEvent::Tcp4Announce => { @@ -171,18 +165,25 @@ impl TrackerStatisticsEventSender for StatsEventSender { } } -#[async_trait] -pub trait TrackerStatisticsRepository: Sync + Send { - async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics>; +#[derive(Clone)] +pub struct StatsRepository { + pub stats: Arc>, } -#[async_trait] -impl TrackerStatisticsRepository for StatsTracker { - async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats.read().await +impl Default for StatsRepository { + fn default() -> Self { + Self::new() } } -pub trait TrackerStatsService: TrackerStatisticsRepository {} +impl StatsRepository { + pub fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(TrackerStatistics::new())), + } + } -impl TrackerStatsService for StatsTracker {} + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + self.stats.read().await + } +} diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 80f6e549d..7e74a3554 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -12,7 +12,7 @@ use crate::databases::database::Database; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; -use crate::statistics::{TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender, TrackerStatsService}; +use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::tracker::key; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; @@ -24,16 +24,16 @@ pub struct TorrentTracker { keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, - stats_tracker: Box, stats_event_sender: Option>, + stats_repository: StatsRepository, database: Box, } impl TorrentTracker { pub fn new( config: Arc, - stats_tracker: Box, stats_event_sender: Option>, + stats_repository: StatsRepository, ) -> Result { let database = database::connect_database(&config.db_driver, &config.db_path)?; @@ -43,8 +43,8 @@ impl TorrentTracker { keys: RwLock::new(std::collections::HashMap::new()), whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), - stats_tracker, stats_event_sender, + stats_repository, database, }) } @@ -238,7 +238,7 @@ impl TorrentTracker { } pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats_tracker.get_stats().await + self.stats_repository.get_stats().await } pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 35d2e0247..b962b1333 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -251,13 +251,11 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use async_trait::async_trait; - use tokio::sync::{RwLock, RwLockReadGuard}; use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, Time}; - use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsRepository, TrackerStatsService}; + use crate::statistics::StatsTracker; use crate::tracker::tracker::TorrentTracker; use crate::{Configuration, PeerId}; @@ -281,8 +279,8 @@ mod tests { } fn initialized_tracker(configuration: Arc) -> Arc { - let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); - Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker), Some(stats_event_sender)).unwrap()) + let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -339,27 +337,6 @@ mod tests { } } - struct TrackerStatsServiceMock { - stats: Arc>, - } - - impl TrackerStatsServiceMock { - fn new() -> Self { - Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), - } - } - } - - #[async_trait] - impl TrackerStatisticsRepository for TrackerStatsServiceMock { - async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats.read().await - } - } - - impl TrackerStatsService for TrackerStatsServiceMock {} - struct TrackerConfigurationBuilder { configuration: Configuration, } @@ -395,8 +372,8 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use mockall::predicate::eq; - use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr, TrackerStatsServiceMock}; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_connect; @@ -448,7 +425,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -459,8 +435,9 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); - let torrent_tracker = - Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); + let torrent_tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await .unwrap(); @@ -468,7 +445,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -477,8 +453,9 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let torrent_tracker = - Arc::new(TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap()); + let torrent_tracker = Arc::new( + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await .unwrap(); @@ -568,14 +545,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, - TrackerStatsServiceMock, }; use crate::PeerId; @@ -714,7 +690,6 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -724,7 +699,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), ); handle_announce( @@ -795,14 +770,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, - TrackerStatsServiceMock, }; use crate::PeerId; @@ -948,7 +922,6 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -958,7 +931,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -988,9 +961,9 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); let tracker = - Arc::new(TorrentTracker::new(configuration, Box::new(stats_tracker), Some(stats_event_sender)).unwrap()); + Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1258,14 +1231,13 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr, TrackerStatsServiceMock}; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -1276,7 +1248,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1292,14 +1264,13 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, TrackerStatisticsEvent}; + use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr, TrackerStatsServiceMock}; + use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let tracker_stats_service = Box::new(TrackerStatsServiceMock::new()); let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); stats_event_sender_mock .expect_send_event() @@ -1310,7 +1281,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), tracker_stats_service, Some(stats_event_sender)).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/tests/udp.rs b/tests/udp.rs index d2b500d5a..abd16427b 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -51,11 +51,10 @@ mod udp_tracker_server { lazy_static::initialize(&static_time::TIME_AT_APP_START); // Initialize stats tracker - let (stats_tracker, stats_event_sender) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Box::new(stats_tracker), Some(stats_event_sender)) - { + let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) From 0dd95e7961dee201a7fd4517230bf645c0bb3839 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Oct 2022 16:21:59 +0100 Subject: [PATCH 110/435] refactor: extract statistics event_handler --- src/tracker/statistics.rs | 335 ++++++++++++++++++++++++++++++++------ 1 file changed, 288 insertions(+), 47 deletions(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 8f203c36e..1a681a7a2 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -76,6 +76,12 @@ impl Default for StatsTracker { } impl StatsTracker { + pub fn new() -> Self { + Self { + stats_repository: StatsRepository::new(), + } + } + pub fn new_active_instance() -> (Box, StatsRepository) { let mut stats_tracker = Self::new(); @@ -84,12 +90,6 @@ impl StatsTracker { (stats_event_sender, stats_tracker.stats_repository) } - pub fn new() -> Self { - Self { - stats_repository: StatsRepository::new(), - } - } - pub fn run_event_listener(&mut self) -> Box { let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); @@ -103,49 +103,56 @@ impl StatsTracker { async fn event_listener(mut receiver: Receiver, stats_repository: StatsRepository) { while let Some(event) = receiver.recv().await { - let mut stats_lock = stats_repository.stats.write().await; - - match event { - TrackerStatisticsEvent::Tcp4Announce => { - stats_lock.tcp4_announces_handled += 1; - stats_lock.tcp4_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp4Scrape => { - stats_lock.tcp4_scrapes_handled += 1; - stats_lock.tcp4_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp6Announce => { - stats_lock.tcp6_announces_handled += 1; - stats_lock.tcp6_connections_handled += 1; - } - TrackerStatisticsEvent::Tcp6Scrape => { - stats_lock.tcp6_scrapes_handled += 1; - stats_lock.tcp6_connections_handled += 1; - } - TrackerStatisticsEvent::Udp4Connect => { - stats_lock.udp4_connections_handled += 1; - } - TrackerStatisticsEvent::Udp4Announce => { - stats_lock.udp4_announces_handled += 1; - } - TrackerStatisticsEvent::Udp4Scrape => { - stats_lock.udp4_scrapes_handled += 1; - } - TrackerStatisticsEvent::Udp6Connect => { - stats_lock.udp6_connections_handled += 1; - } - TrackerStatisticsEvent::Udp6Announce => { - stats_lock.udp6_announces_handled += 1; - } - TrackerStatisticsEvent::Udp6Scrape => { - stats_lock.udp6_scrapes_handled += 1; - } - } - - debug!("stats: {:?}", stats_lock); + event_handler(event, &stats_repository).await; + } +} - drop(stats_lock); +async fn event_handler(event: TrackerStatisticsEvent, stats_repository: &StatsRepository) { + match event { + // TCP4 + TrackerStatisticsEvent::Tcp4Announce => { + stats_repository.increase_tcp4_announces().await; + stats_repository.increase_tcp4_connections().await; + } + TrackerStatisticsEvent::Tcp4Scrape => { + stats_repository.increase_tcp4_scrapes().await; + stats_repository.increase_tcp4_connections().await; + } + + // TCP6 + TrackerStatisticsEvent::Tcp6Announce => { + stats_repository.increase_tcp6_announces().await; + stats_repository.increase_tcp6_connections().await; + } + TrackerStatisticsEvent::Tcp6Scrape => { + stats_repository.increase_tcp6_scrapes().await; + stats_repository.increase_tcp6_connections().await; + } + + // UDP4 + TrackerStatisticsEvent::Udp4Connect => { + stats_repository.increase_udp4_connections().await; + } + TrackerStatisticsEvent::Udp4Announce => { + stats_repository.increase_udp4_announces().await; + } + TrackerStatisticsEvent::Udp4Scrape => { + stats_repository.increase_udp4_scrapes().await; + } + + // UDP6 + TrackerStatisticsEvent::Udp6Connect => { + stats_repository.increase_udp6_connections().await; + } + TrackerStatisticsEvent::Udp6Announce => { + stats_repository.increase_udp6_announces().await; + } + TrackerStatisticsEvent::Udp6Scrape => { + stats_repository.increase_udp6_scrapes().await; + } } + + debug!("stats: {:?}", stats_repository.get_stats().await); } #[async_trait] @@ -186,4 +193,238 @@ impl StatsRepository { pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { self.stats.read().await } + + pub async fn increase_tcp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_scrapes_handled += 1; + drop(stats_lock); + } +} + +#[cfg(test)] +mod tests { + + mod event_handler { + use crate::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; + + #[tokio::test] + async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp4Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp6Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { + let stats_repository = StatsRepository::new(); + + event_handler(TrackerStatisticsEvent::Udp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_scrapes_handled, 1); + } + } } From 9e493055f8c2bb59b923cce1ca2306c681e09c59 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Oct 2022 16:36:28 +0100 Subject: [PATCH 111/435] test: add tests for StatsTracker --- src/tracker/statistics.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 1a681a7a2..c4d4971af 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -270,6 +270,30 @@ impl StatsRepository { #[cfg(test)] mod tests { + mod stats_tracker { + use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; + + #[tokio::test] + async fn should_contain_the_tracker_statistics() { + let stats_tracker = StatsTracker::new(); + + let stats = stats_tracker.stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, TrackerStatistics::new().tcp4_announces_handled); + } + + #[tokio::test] + async fn should_create_an_event_sender_to_send_statistical_events() { + let mut stats_tracker = StatsTracker::new(); + + let event_sender = stats_tracker.run_event_listener(); + + let result = event_sender.send_event(TrackerStatisticsEvent::Udp4Connect).await; + + assert!(result.is_some()); + } + } + mod event_handler { use crate::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; From 6f77dfeee64037cd9011617057d9f1e5c3e2fd63 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 26 Oct 2022 15:31:37 +0100 Subject: [PATCH 112/435] fix: use only minor version for dependencies --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 18188565c..b2b256a2c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,4 +61,4 @@ aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } [dev-dependencies] -mockall = "0.11.3" +mockall = "0.11" From ce8672931a4694b0104bdf17eecc8b90ca0ac2b3 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:15:29 +0100 Subject: [PATCH 113/435] vscode: move and add cSpell words, and fix settings --- .vscode/extensions.json | 2 +- .vscode/settings.json | 13 +--------- cSpell.json | 53 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 13 deletions(-) create mode 100644 cSpell.json diff --git a/.vscode/extensions.json b/.vscode/extensions.json index b55ef8bf6..11d11a5c5 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,6 +1,6 @@ { "recommendations": [ "streetsidesoftware.code-spell-checker", - "matklad.rust-analyzer" + "rust-lang.rust-analyzer" ] } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 72e8db7e0..f1027e9bd 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,17 +1,6 @@ { - "cSpell.words": [ - "byteorder", - "hasher", - "leechers", - "nanos", - "rngs", - "Seedable", - "thiserror", - "torrust", - "typenum" - ], "[rust]": { "editor.formatOnSave": true }, "rust-analyzer.checkOnSave.command": "clippy", -} +} \ No newline at end of file diff --git a/cSpell.json b/cSpell.json new file mode 100644 index 000000000..2cc3e1179 --- /dev/null +++ b/cSpell.json @@ -0,0 +1,53 @@ +{ + "words": [ + "AUTOINCREMENT", + "automock", + "Avicora", + "Azureus", + "bencode", + "binascii", + "Bitflu", + "bufs", + "byteorder", + "canonicalize", + "canonicalized", + "chrono", + "completei", + "downloadedi", + "filesd", + "Freebox", + "hasher", + "hexlify", + "Hydranode", + "incompletei", + "intervali", + "leecher", + "leechers", + "libtorrent", + "Lphant", + "mockall", + "nanos", + "nocapture", + "ostr", + "Pando", + "Rasterbar", + "repr", + "rngs", + "rusqlite", + "Seedable", + "Shareaza", + "sharktorrent", + "socketaddr", + "sqllite", + "Swiftbit", + "thiserror", + "Torrentstorm", + "torrust", + "typenum", + "Unamed", + "untuple", + "Vagaa", + "Xtorrent", + "Xunlei" + ] +} From 68a88e81c62c37b2064b2eac758e137615d75fc3 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:27:10 +0100 Subject: [PATCH 114/435] clippy: fix src/http/response.rs --- src/http/response.rs | 64 ++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/src/http/response.rs b/src/http/response.rs index 4db12f995..c87b5e0e8 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -38,34 +38,34 @@ impl AnnounceResponse { for peer in &self.peers { match peer.ip { IpAddr::V4(ip) => { - peers_v4.write(&u32::from(ip).to_be_bytes())?; - peers_v4.write(&peer.port.to_be_bytes())?; + peers_v4.write_all(&u32::from(ip).to_be_bytes())?; + peers_v4.write_all(&peer.port.to_be_bytes())?; } IpAddr::V6(ip) => { - peers_v6.write(&u128::from(ip).to_be_bytes())?; - peers_v6.write(&peer.port.to_be_bytes())?; + peers_v6.write_all(&u128::from(ip).to_be_bytes())?; + peers_v6.write_all(&peer.port.to_be_bytes())?; } } } let mut bytes: Vec = Vec::new(); - bytes.write(b"d8:intervali")?; - bytes.write(&self.interval.to_string().as_bytes())?; - bytes.write(b"e12:min intervali")?; - bytes.write(&self.interval_min.to_string().as_bytes())?; - bytes.write(b"e8:completei")?; - bytes.write(&self.complete.to_string().as_bytes())?; - bytes.write(b"e10:incompletei")?; - bytes.write(&self.incomplete.to_string().as_bytes())?; - bytes.write(b"e5:peers")?; - bytes.write(&peers_v4.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v4.as_slice())?; - bytes.write(b"e6:peers6")?; - bytes.write(&peers_v6.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v6.as_slice())?; - bytes.write(b"e")?; + bytes.write_all(b"d8:intervali")?; + bytes.write_all(self.interval.to_string().as_bytes())?; + bytes.write_all(b"e12:min intervali")?; + bytes.write_all(self.interval_min.to_string().as_bytes())?; + bytes.write_all(b"e8:completei")?; + bytes.write_all(self.complete.to_string().as_bytes())?; + bytes.write_all(b"e10:incompletei")?; + bytes.write_all(self.incomplete.to_string().as_bytes())?; + bytes.write_all(b"e5:peers")?; + bytes.write_all(peers_v4.len().to_string().as_bytes())?; + bytes.write_all(b":")?; + bytes.write_all(peers_v4.as_slice())?; + bytes.write_all(b"e6:peers6")?; + bytes.write_all(peers_v6.len().to_string().as_bytes())?; + bytes.write_all(b":")?; + bytes.write_all(peers_v6.as_slice())?; + bytes.write_all(b"e")?; Ok(bytes) } @@ -87,21 +87,21 @@ impl ScrapeResponse { pub fn write(&self) -> Result, Box> { let mut bytes: Vec = Vec::new(); - bytes.write(b"d5:filesd")?; + bytes.write_all(b"d5:filesd")?; for (info_hash, scrape_response_entry) in self.files.iter() { - bytes.write(b"20:")?; - bytes.write(&info_hash.0)?; - bytes.write(b"d8:completei")?; - bytes.write(scrape_response_entry.complete.to_string().as_bytes())?; - bytes.write(b"e10:downloadedi")?; - bytes.write(scrape_response_entry.downloaded.to_string().as_bytes())?; - bytes.write(b"e10:incompletei")?; - bytes.write(scrape_response_entry.incomplete.to_string().as_bytes())?; - bytes.write(b"ee")?; + bytes.write_all(b"20:")?; + bytes.write_all(&info_hash.0)?; + bytes.write_all(b"d8:completei")?; + bytes.write_all(scrape_response_entry.complete.to_string().as_bytes())?; + bytes.write_all(b"e10:downloadedi")?; + bytes.write_all(scrape_response_entry.downloaded.to_string().as_bytes())?; + bytes.write_all(b"e10:incompletei")?; + bytes.write_all(scrape_response_entry.incomplete.to_string().as_bytes())?; + bytes.write_all(b"ee")?; } - bytes.write(b"ee")?; + bytes.write_all(b"ee")?; Ok(bytes) } From 7f3066a5064e5b08e1c1b23427f9427b473fc31d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:28:16 +0100 Subject: [PATCH 115/435] clippy: fix src/http/handlers.rs --- src/http/handlers.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 5214bbe6e..73f7c866c 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -93,7 +93,7 @@ pub async fn handle_scrape( let db = tracker.get_torrents().await; for info_hash in scrape_request.info_hashes.iter() { - let scrape_entry = match db.get(&info_hash) { + let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); @@ -117,7 +117,7 @@ pub async fn handle_scrape( }, }; - files.insert(info_hash.clone(), scrape_entry); + files.insert(*info_hash, scrape_entry); } // send stats event From 9a2422e680bfb576fa12b4ff2732d8b04c9ea538 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:30:04 +0100 Subject: [PATCH 116/435] clippy: fix src/http/filters.rs --- src/http/filters.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index 514cb804c..bee89661b 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -61,12 +61,12 @@ pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter WebResult> { - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut info_hashes: Vec = Vec::new(); for v in split_raw_query { if v.contains("info_hash") { - let raw_info_hash = v.split("=").collect::>()[1]; + let raw_info_hash = v.split('=').collect::>()[1]; let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); if let Ok(ih) = info_hash { @@ -77,7 +77,7 @@ async fn info_hashes(raw_query: String) -> WebResult> { if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { Err(reject::custom(ServerError::ExceededInfoHashLimit)) - } else if info_hashes.len() < 1 { + } else if info_hashes.is_empty() { Err(reject::custom(ServerError::InvalidInfoHash)) } else { Ok(info_hashes) @@ -87,7 +87,7 @@ async fn info_hashes(raw_query: String) -> WebResult> { /// Parse PeerId from raw query string async fn peer_id(raw_query: String) -> WebResult { // put all query params in a vec - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut peer_id: Option = None; @@ -95,7 +95,7 @@ async fn peer_id(raw_query: String) -> WebResult { // look for the peer_id param if v.contains("peer_id") { // get raw percent_encoded peer_id - let raw_peer_id = v.split("=").collect::>()[1]; + let raw_peer_id = v.split('=').collect::>()[1]; // decode peer_id let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); @@ -143,7 +143,7 @@ async fn peer_addr( // set client ip to last forwarded ip let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - IpAddr::from_str(x_forwarded_ip).or_else(|_| Err(reject::custom(ServerError::AddressNotFound))) + IpAddr::from_str(x_forwarded_ip).map_err(|_| reject::custom(ServerError::AddressNotFound)) } false => Ok(remote_addr.unwrap().ip()), } From 4d93dbc0fbfdc171d581a1b71930d4bc011a9915 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:31:16 +0100 Subject: [PATCH 117/435] clippy: fix src/api/server.rs --- src/api/server.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index 5285c9b2b..a8b235a66 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -129,10 +129,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let view_stats_list = filters::method::get() .and(filters::path::path("stats")) .and(filters::path::end()) - .map(move || { - let tracker = api_stats.clone(); - tracker - }) + .map(move || api_stats.clone()) .and_then(|tracker: Arc| async move { let mut results = Stats { torrents: 0, @@ -304,10 +301,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::path("whitelist")) .and(filters::path::path("reload")) .and(filters::path::end()) - .map(move || { - let tracker = t7.clone(); - tracker - }) + .map(move || t7.clone()) .and_then(|tracker: Arc| async move { match tracker.load_whitelist().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), @@ -324,10 +318,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::path("keys")) .and(filters::path::path("reload")) .and(filters::path::end()) - .map(move || { - let tracker = t8.clone(); - tracker - }) + .map(move || t8.clone()) .and_then(|tracker: Arc| async move { match tracker.load_keys().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), From eccf1b095afa9c0dbacdc81ff94fbfc78f6e18ad Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:32:14 +0100 Subject: [PATCH 118/435] clippy: fix src/databases/database.rs --- src/databases/database.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index adc735fd2..c67f39a54 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -7,7 +7,7 @@ use crate::databases::sqlite::SqliteDatabase; use crate::tracker::key::AuthKey; use crate::InfoHash; -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub enum DatabaseDrivers { Sqlite3, MySQL, @@ -55,7 +55,7 @@ pub trait Database: Sync + Send { async fn remove_key_from_keys(&self, key: &str) -> Result; } -#[derive(Debug, Display, PartialEq, Error)] +#[derive(Debug, Display, PartialEq, Eq, Error)] #[allow(dead_code)] pub enum Error { #[display(fmt = "Query returned no rows.")] From 5d586aa17af476e76a4d455c1358ed0131a3e0ef Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:32:52 +0100 Subject: [PATCH 119/435] clippy: fix src/databases/mysql.rs --- src/databases/mysql.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 33287df6d..a4d870101 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -19,7 +19,7 @@ pub struct MysqlDatabase { impl MysqlDatabase { pub fn new(db_path: &str) -> Result { - let opts = Opts::from_url(&db_path).expect("Failed to connect to MySQL database."); + let opts = Opts::from_url(db_path).expect("Failed to connect to MySQL database."); let builder = OptsBuilder::from_opts(opts); let manager = MysqlConnectionManager::new(builder); let pool = r2d2::Pool::builder() From 5b30adf305615f20e4cba31037a883d5b63d7b79 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:35:11 +0100 Subject: [PATCH 120/435] clippy: fix src/databases/sqlite.rs --- src/databases/sqlite.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index fb66c0b94..ef9f12d9c 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -135,7 +135,7 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; - let mut rows = stmt.query(&[info_hash])?; + let mut rows = stmt.query([info_hash])?; if let Some(row) = rows.next()? { let info_hash: String = row.get(0).unwrap(); @@ -223,7 +223,7 @@ impl Database for SqliteDatabase { async fn remove_key_from_keys(&self, key: &str) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { + match conn.execute("DELETE FROM keys WHERE key = ?", [key]) { Ok(updated) => { if updated > 0 { return Ok(updated); From 706830dd29f936e23b9f912a14efca09df690789 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:44:53 +0100 Subject: [PATCH 121/435] clippy: fix src/protocol/common.rs --- src/protocol/common.rs | 47 ++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/src/protocol/common.rs b/src/protocol/common.rs index f1bd6a99c..431521764 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -26,17 +26,9 @@ pub enum AnnounceEventDef { #[serde(remote = "NumberOfBytes")] pub struct NumberOfBytesDef(pub i64); -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Ord)] +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct InfoHash(pub [u8; 20]); -impl InfoHash { - pub fn to_string(&self) -> String { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - String::from(std::str::from_utf8(bytes_out).unwrap()) - } -} - impl std::fmt::Display for InfoHash { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let mut chars = [0u8; 40]; @@ -49,7 +41,7 @@ impl std::str::FromStr for InfoHash { type Err = binascii::ConvertError; fn from_str(s: &str) -> Result { - let mut i = Self { 0: [0u8; 20] }; + let mut i = Self([0u8; 20]); if s.len() != 40 { return Err(binascii::ConvertError::InvalidInputLength); } @@ -58,6 +50,12 @@ impl std::str::FromStr for InfoHash { } } +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + impl std::cmp::PartialOrd for InfoHash { fn partial_cmp(&self, other: &InfoHash) -> Option { self.0.partial_cmp(&other.0) @@ -67,15 +65,15 @@ impl std::cmp::PartialOrd for InfoHash { impl std::convert::From<&[u8]> for InfoHash { fn from(data: &[u8]) -> InfoHash { assert_eq!(data.len(), 20); - let mut ret = InfoHash { 0: [0u8; 20] }; + let mut ret = InfoHash([0u8; 20]); ret.0.clone_from_slice(data); - return ret; + ret } } -impl std::convert::Into for [u8; 20] { - fn into(self) -> InfoHash { - InfoHash { 0: self } +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) } } @@ -206,15 +204,15 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { )); } - let mut res = InfoHash { 0: [0u8; 20] }; + let mut res = InfoHash([0u8; 20]); - if let Err(_) = binascii::hex2bin(v.as_bytes(), &mut res.0) { + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(v), &"expected a hexadecimal string", )); } else { - return Ok(res); + Ok(res) } } } @@ -222,15 +220,14 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord)] pub struct PeerId(pub [u8; 20]); -impl PeerId { - pub fn to_string(&self) -> String { +impl std::fmt::Display for PeerId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut buffer = [0u8; 20]; let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); - return if let Some(bytes_out) = bytes_out { - String::from(std::str::from_utf8(bytes_out).unwrap()) - } else { - "".to_string() - }; + match bytes_out { + Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), + None => write!(f, ""), + } } } From a8cfeb120c39da96fff3d1c7cc3c4cd472eeb417 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:45:20 +0100 Subject: [PATCH 122/435] clippy: fix src/tracker/key.rs --- src/tracker/key.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker/key.rs b/src/tracker/key.rs index c513b48da..1bf0557a1 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -63,7 +63,7 @@ impl AuthKey { } } -#[derive(Debug, Display, PartialEq, Error)] +#[derive(Debug, Display, PartialEq, Eq, Error)] #[allow(dead_code)] pub enum Error { #[display(fmt = "Key could not be verified.")] From d20bc691189044fe695601d5afc87221b2f9eee0 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:57:53 +0100 Subject: [PATCH 123/435] clippy: fix src/tracker/tracker.rs --- src/tracker/tracker.rs | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs index 7e74a3554..15000c827 100644 --- a/src/tracker/tracker.rs +++ b/src/tracker/tracker.rs @@ -13,10 +13,9 @@ use crate::mode::TrackerMode; use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; -use crate::tracker::key; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -use crate::Configuration; +use crate::{key, Configuration}; pub struct TorrentTracker { pub config: Arc, @@ -69,7 +68,7 @@ impl TorrentTracker { } pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { - self.database.remove_key_from_keys(&key).await?; + self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(key); Ok(()) } @@ -112,7 +111,7 @@ impl TorrentTracker { // Removing torrents is not relevant to public trackers. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.remove_info_hash_from_whitelist(info_hash.clone()).await?; + self.database.remove_info_hash_from_whitelist(*info_hash).await?; self.whitelist.write().await.remove(info_hash); Ok(()) } @@ -155,10 +154,8 @@ impl TorrentTracker { } // check if info_hash is whitelisted - if self.is_whitelisted() { - if !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted); - } + if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { + return Err(TorrentError::TorrentNotWhitelisted); } Ok(()) @@ -180,7 +177,7 @@ impl TorrentTracker { completed, }; - torrents.insert(info_hash.clone(), torrent_entry); + torrents.insert(info_hash, torrent_entry); } Ok(()) @@ -209,7 +206,7 @@ impl TorrentTracker { pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { let mut torrents = self.torrents.write().await; - let torrent_entry = match torrents.entry(info_hash.clone()) { + let torrent_entry = match torrents.entry(*info_hash) { Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), Entry::Occupied(entry) => entry.into_mut(), }; @@ -220,7 +217,7 @@ impl TorrentTracker { if self.config.persistent_torrent_completed_stat && stats_updated { let _ = self .database - .save_persistent_torrent(&info_hash, torrent_entry.completed) + .save_persistent_torrent(info_hash, torrent_entry.completed) .await; } @@ -258,8 +255,8 @@ impl TorrentTracker { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); match self.config.persistent_torrent_completed_stat { - true => torrent_entry.completed > 0 || torrent_entry.peers.len() > 0, - false => torrent_entry.peers.len() > 0, + true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), + false => !torrent_entry.peers.is_empty(), } }); } else { From 1dc43f5645d0500432f814320daf8050c6b94853 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:58:19 +0100 Subject: [PATCH 124/435] clippy: fix src/tracker/mode.rs --- src/tracker/mode.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index 9110b7f4f..f444b4523 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -1,7 +1,7 @@ use serde; use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum TrackerMode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] From ce0ea09d8e6504bc8bf2e0eaa5f7a2415cfdb293 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 10:59:40 +0100 Subject: [PATCH 125/435] clippy: fix src/tracker/peer.rs --- src/tracker/peer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 7ac35179a..7a2599f82 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -75,8 +75,8 @@ impl TorrentPeer { // potentially substitute localhost ip with external ip pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if remote_ip.is_loopback() && host_opt_ip.is_some() { - SocketAddr::new(host_opt_ip.unwrap(), port) + if let Some(host_ip) = host_opt_ip.filter(|_| remote_ip.is_loopback()) { + SocketAddr::new(host_ip, port) } else { SocketAddr::new(remote_ip, port) } From f48072e5f5d336c7c5038a8f4165ffdaf75c6326 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:00:10 +0100 Subject: [PATCH 126/435] clippy: fix src/tracker/statistics.rs --- src/tracker/statistics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index c4d4971af..ac3889270 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -10,7 +10,7 @@ use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub enum TrackerStatisticsEvent { Tcp4Announce, Tcp4Scrape, From 2084c4a8375ccc14b3aac0c858865dbb3b807431 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:01:23 +0100 Subject: [PATCH 127/435] clippy: fix src/tracker/torrent.rs --- src/tracker/torrent.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 65eaa0a40..4e602d359 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -82,6 +82,12 @@ impl TorrentEntry { } } +impl Default for TorrentEntry { + fn default() -> Self { + Self::new() + } +} + #[derive(Debug)] pub struct TorrentStats { pub completed: u32, From 8f80060c52ada04b8ac6d1d86890d7c939a39d8b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:04:46 +0100 Subject: [PATCH 128/435] clippy: fix src/config.rs --- src/config.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/config.rs b/src/config.rs index b59d572ea..8c17070d2 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,14 +12,14 @@ use {std, toml}; use crate::databases::database::DatabaseDrivers; use crate::mode::TrackerMode; -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct UdpTrackerConfig { pub enabled: bool, pub bind_address: String, } #[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct HttpTrackerConfig { pub enabled: bool, pub bind_address: String, @@ -30,14 +30,14 @@ pub struct HttpTrackerConfig { pub ssl_key_path: Option, } -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct HttpApiConfig { pub enabled: bool, pub bind_address: String, pub access_tokens: HashMap, } -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, @@ -140,9 +140,9 @@ impl Configuration { eprintln!("Creating config file.."); let config = Configuration::default(); let _ = config.save_to_file(path); - return Err(ConfigError::Message(format!( - "Please edit the config.TOML in the root folder and restart the tracker." - ))); + return Err(ConfigError::Message( + "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), + )); } let torrust_config: Configuration = config @@ -152,7 +152,7 @@ impl Configuration { Ok(torrust_config) } - pub fn save_to_file(&self, path: &str) -> Result<(), ()> { + pub fn save_to_file(&self, path: &str) -> Result<(), ConfigurationError> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write(path, toml_string).expect("Could not write to file!"); Ok(()) @@ -236,7 +236,7 @@ mod tests { let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); // Convert to argument type for Configuration::save_to_file - let config_file_path = temp_file.clone(); + let config_file_path = temp_file; let path = config_file_path.to_string_lossy().to_string(); let default_configuration = Configuration::default(); From 96c14324b9a8d9a2587cdeeeb95e5a71171d8fb9 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:05:22 +0100 Subject: [PATCH 129/435] clippy: fix src/setup.rs --- src/setup.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/setup.rs b/src/setup.rs index 0c5ed9004..387b6c26e 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -35,7 +35,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< udp_tracker_config.bind_address, config.mode ); } else { - jobs.push(udp_tracker::start_job(&udp_tracker_config, tracker.clone())) + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())) } } @@ -44,17 +44,17 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(&http_tracker_config, tracker.clone())); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone())); } // Start HTTP API server if config.http_api.enabled { - jobs.push(tracker_api::start_job(&config, tracker.clone())); + jobs.push(tracker_api::start_job(config, tracker.clone())); } // Remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(&config, tracker.clone())); + jobs.push(torrent_cleanup::start_job(config, tracker.clone())); } jobs From acf9ee26ec7ae1f2f53a84a8eb6f292222e7ea96 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:07:10 +0100 Subject: [PATCH 130/435] clippy: fix tests/udp.rs --- tests/udp.rs | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index abd16427b..0bc2a6506 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -67,7 +67,7 @@ mod udp_tracker_server { let udp_tracker_config = &configuration.udp_trackers[0]; // Start the UDP tracker job - self.job = Some(udp_tracker::start_job(&udp_tracker_config, tracker.clone())); + self.job = Some(udp_tracker::start_job(udp_tracker_config, tracker)); self.bind_address = Some(udp_tracker_config.bind_address.clone()); @@ -136,7 +136,7 @@ mod udp_tracker_server { Err(_) => panic!("could not write request to bytes."), }; - self.udp_client.send(&request_data).await + self.udp_client.send(request_data).await } async fn receive(&self) -> Response { @@ -178,30 +178,24 @@ mod udp_tracker_server { fn is_error_response(response: &Response, error_message: &str) -> bool { match response { - Response::Error(error_response) => return error_response.message.starts_with(error_message), - _ => return false, - }; + Response::Error(error_response) => error_response.message.starts_with(error_message), + _ => false, + } } fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { match response { - Response::Connect(connect_response) => return connect_response.transaction_id == transaction_id, - _ => return false, - }; + Response::Connect(connect_response) => connect_response.transaction_id == transaction_id, + _ => false, + } } fn is_ipv4_announce_response(response: &Response) -> bool { - match response { - Response::AnnounceIpv4(_) => return true, - _ => return false, - }; + matches!(response, Response::AnnounceIpv4(_)) } fn is_scrape_response(response: &Response) -> bool { - match response { - Response::Scrape(_) => return true, - _ => return false, - }; + matches!(response, Response::Scrape(_)) } #[tokio::test] From 8dfffe4db683df124e2b7e3c3ad967a36df1928f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:13:28 +0100 Subject: [PATCH 131/435] clippy: fix src/udp/connection_cookie.rs --- src/udp/connection_cookie.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index a17431b9c..c40a56959 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -22,9 +22,8 @@ pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { let time_extent = cookie_builder::get_last_time_extent(); - let cookie = cookie_builder::build(remote_address, &time_extent); //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); - cookie + cookie_builder::build(remote_address, &time_extent) } pub fn check_connection_cookie( From 7336d5e810d45453d96a770b05f5e3a8038ee729 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:14:57 +0100 Subject: [PATCH 132/435] clippy: fix src/udp/handlers.rs --- src/udp/handlers.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index b962b1333..cc4229b66 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -667,8 +667,8 @@ mod tests { let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); - response + + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() } #[tokio::test] @@ -899,8 +899,8 @@ mod tests { let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); - response + + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() } #[tokio::test] @@ -1073,7 +1073,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), + connection_id: into_connection_id(&make_connection_cookie(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } From 78633c4662a743984e1c13115078593e8a07e38f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:18:29 +0100 Subject: [PATCH 133/435] clippy: merge src/tracker/tracker.rs into src/tracker/mod.rs --- src/api/server.rs | 2 +- src/http/filters.rs | 2 +- src/http/handlers.rs | 2 +- src/http/routes.rs | 2 +- src/http/server.rs | 2 +- src/jobs/http_tracker.rs | 2 +- src/jobs/torrent_cleanup.rs | 2 +- src/jobs/tracker_api.rs | 2 +- src/jobs/udp_tracker.rs | 2 +- src/main.rs | 2 +- src/setup.rs | 2 +- src/tracker/mod.rs | 270 +++++++++++++++++++++++++++++++++++- src/tracker/tracker.rs | 268 ----------------------------------- src/udp/handlers.rs | 18 +-- src/udp/server.rs | 2 +- tests/udp.rs | 2 +- 16 files changed, 291 insertions(+), 291 deletions(-) delete mode 100644 src/tracker/tracker.rs diff --git a/src/api/server.rs b/src/api/server.rs index a8b235a66..5a604aa0c 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -9,7 +9,7 @@ use warp::{filters, reply, serve, Filter}; use crate::peer::TorrentPeer; use crate::protocol::common::*; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; #[derive(Deserialize, Debug)] struct TorrentInfoQuery { diff --git a/src/http/filters.rs b/src/http/filters.rs index bee89661b..42d1592ff 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -7,7 +7,7 @@ use warp::{reject, Filter, Rejection}; use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; use crate::tracker::key::AuthKey; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; /// Pass Arc along diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 73f7c866c..87d2d51f6 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -15,7 +15,7 @@ use crate::peer::TorrentPeer; use crate::tracker::key::AuthKey; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::InfoHash; /// Authenticate InfoHash using optional AuthKey diff --git a/src/http/routes.rs b/src/http/routes.rs index a9ca3027f..8bfaf5ed9 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -6,7 +6,7 @@ use warp::{Filter, Rejection}; use crate::http::{ handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker, }; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; /// All routes pub fn routes(tracker: Arc) -> impl Filter + Clone { diff --git a/src/http/server.rs b/src/http/server.rs index 8b92d8792..4e48f97e3 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; use crate::http::routes; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; /// Server that listens on HTTP, needs a TorrentTracker #[derive(Clone)] diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index ef67f0a7e..2d8f307b4 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use log::{info, warn}; use tokio::task::JoinHandle; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::{HttpServer, HttpTrackerConfig}; pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 6e4b0c77e..04b064043 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -4,7 +4,7 @@ use chrono::Utc; use log::info; use tokio::task::JoinHandle; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index f3c9ae788..97b1fa3b0 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -4,7 +4,7 @@ use log::info; use tokio::task::JoinHandle; use crate::api::server; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index f93979c9f..00fdaddbe 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::{UdpServer, UdpTrackerConfig}; pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { diff --git a/src/main.rs b/src/main.rs index 08061cd7b..bf832dbf4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use log::info; use torrust_tracker::stats::setup_statistics; -use torrust_tracker::tracker::tracker::TorrentTracker; +use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; #[tokio::main] diff --git a/src/setup.rs b/src/setup.rs index 387b6c26e..2ecc1c143 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -4,7 +4,7 @@ use log::warn; use tokio::task::JoinHandle; use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::Configuration; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index bbb027a35..77f51098a 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -3,4 +3,272 @@ pub mod mode; pub mod peer; pub mod statistics; pub mod torrent; -pub mod tracker; + +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use tokio::sync::mpsc::error::SendError; +use tokio::sync::{RwLock, RwLockReadGuard}; + +use crate::databases::database; +use crate::databases::database::Database; +use crate::mode::TrackerMode; +use crate::peer::TorrentPeer; +use crate::protocol::common::InfoHash; +use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; +use crate::tracker::key::AuthKey; +use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; +use crate::Configuration; + +pub struct TorrentTracker { + pub config: Arc, + mode: TrackerMode, + keys: RwLock>, + whitelist: RwLock>, + torrents: RwLock>, + stats_event_sender: Option>, + stats_repository: StatsRepository, + database: Box, +} + +impl TorrentTracker { + pub fn new( + config: Arc, + stats_event_sender: Option>, + stats_repository: StatsRepository, + ) -> Result { + let database = database::connect_database(&config.db_driver, &config.db_path)?; + + Ok(TorrentTracker { + config: config.clone(), + mode: config.mode, + keys: RwLock::new(std::collections::HashMap::new()), + whitelist: RwLock::new(std::collections::HashSet::new()), + torrents: RwLock::new(std::collections::BTreeMap::new()), + stats_event_sender, + stats_repository, + database, + }) + } + + pub fn is_public(&self) -> bool { + self.mode == TrackerMode::Public + } + + pub fn is_private(&self) -> bool { + self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed + } + + pub fn is_whitelisted(&self) -> bool { + self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed + } + + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = key::generate_auth_key(lifetime); + self.database.add_key_to_keys(&auth_key).await?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { + self.database.remove_key_from_keys(key).await?; + self.keys.write().await.remove(key); + Ok(()) + } + + pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { + match self.keys.read().await.get(&auth_key.key) { + None => Err(key::Error::KeyInvalid), + Some(key) => key::verify_auth_key(key), + } + } + + pub async fn load_keys(&self) -> Result<(), database::Error> { + let keys_from_database = self.database.load_keys().await?; + let mut keys = self.keys.write().await; + + keys.clear(); + + for key in keys_from_database { + let _ = keys.insert(key.key.clone(), key); + } + + Ok(()) + } + + // Adding torrents is not relevant to public trackers. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.add_torrent_to_database_whitelist(info_hash).await?; + self.add_torrent_to_memory_whitelist(info_hash).await; + Ok(()) + } + + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.add_info_hash_to_whitelist(*info_hash).await?; + Ok(()) + } + + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + + // Removing torrents is not relevant to public trackers. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.remove_info_hash_from_whitelist(*info_hash).await?; + self.whitelist.write().await.remove(info_hash); + Ok(()) + } + + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + pub async fn load_whitelist(&self) -> Result<(), database::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _ = whitelist.insert(info_hash); + } + + Ok(()) + } + + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + // no authentication needed in public mode + if self.is_public() { + return Ok(()); + } + + // check if auth_key is set and valid + if self.is_private() { + match key { + Some(key) => { + if self.verify_auth_key(key).await.is_err() { + return Err(TorrentError::PeerKeyNotValid); + } + } + None => { + return Err(TorrentError::PeerNotAuthenticated); + } + } + } + + // check if info_hash is whitelisted + if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { + return Err(TorrentError::TorrentNotWhitelisted); + } + + Ok(()) + } + + // Loading the torrents from database into memory + pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { + let persistent_torrents = self.database.load_persistent_torrents().await?; + let mut torrents = self.torrents.write().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(&info_hash) { + continue; + } + + let torrent_entry = TorrentEntry { + peers: Default::default(), + completed, + }; + + torrents.insert(info_hash, torrent_entry); + } + + Ok(()) + } + + /// Get all torrent peers for a given torrent filtering out the peer with the client address + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), + } + } + + /// Get all torrent peers for a given torrent + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), + } + } + + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { + let mut torrents = self.torrents.write().await; + + let torrent_entry = match torrents.entry(*info_hash) { + Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), + Entry::Occupied(entry) => entry.into_mut(), + }; + + let stats_updated = torrent_entry.update_peer(peer); + + // todo: move this action to a separate worker + if self.config.persistent_torrent_completed_stat && stats_updated { + let _ = self + .database + .save_persistent_torrent(info_hash, torrent_entry.completed) + .await; + } + + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + TorrentStats { + seeders, + leechers, + completed, + } + } + + pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { + self.torrents.read().await + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + self.stats_repository.get_stats().await + } + + pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { + match &self.stats_event_sender { + None => None, + Some(stats_event_sender) => stats_event_sender.send_event(event).await, + } + } + + // Remove inactive peers and (optionally) peerless torrents + pub async fn cleanup_torrents(&self) { + let mut torrents_lock = self.torrents.write().await; + + // If we don't need to remove torrents we will use the faster iter + if self.config.remove_peerless_torrents { + torrents_lock.retain(|_, torrent_entry| { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + + match self.config.persistent_torrent_completed_stat { + true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), + false => !torrent_entry.peers.is_empty(), + } + }); + } else { + for (_, torrent_entry) in torrents_lock.iter_mut() { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + } + } + } +} diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs deleted file mode 100644 index 15000c827..000000000 --- a/src/tracker/tracker.rs +++ /dev/null @@ -1,268 +0,0 @@ -use std::collections::btree_map::Entry; -use std::collections::BTreeMap; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; - -use tokio::sync::mpsc::error::SendError; -use tokio::sync::{RwLock, RwLockReadGuard}; - -use crate::databases::database; -use crate::databases::database::Database; -use crate::mode::TrackerMode; -use crate::peer::TorrentPeer; -use crate::protocol::common::InfoHash; -use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; -use crate::tracker::key::AuthKey; -use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -use crate::{key, Configuration}; - -pub struct TorrentTracker { - pub config: Arc, - mode: TrackerMode, - keys: RwLock>, - whitelist: RwLock>, - torrents: RwLock>, - stats_event_sender: Option>, - stats_repository: StatsRepository, - database: Box, -} - -impl TorrentTracker { - pub fn new( - config: Arc, - stats_event_sender: Option>, - stats_repository: StatsRepository, - ) -> Result { - let database = database::connect_database(&config.db_driver, &config.db_path)?; - - Ok(TorrentTracker { - config: config.clone(), - mode: config.mode, - keys: RwLock::new(std::collections::HashMap::new()), - whitelist: RwLock::new(std::collections::HashSet::new()), - torrents: RwLock::new(std::collections::BTreeMap::new()), - stats_event_sender, - stats_repository, - database, - }) - } - - pub fn is_public(&self) -> bool { - self.mode == TrackerMode::Public - } - - pub fn is_private(&self) -> bool { - self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed - } - - pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed - } - - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = key::generate_auth_key(lifetime); - self.database.add_key_to_keys(&auth_key).await?; - self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); - Ok(auth_key) - } - - pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { - self.database.remove_key_from_keys(key).await?; - self.keys.write().await.remove(key); - Ok(()) - } - - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { - match self.keys.read().await.get(&auth_key.key) { - None => Err(key::Error::KeyInvalid), - Some(key) => key::verify_auth_key(key), - } - } - - pub async fn load_keys(&self) -> Result<(), database::Error> { - let keys_from_database = self.database.load_keys().await?; - let mut keys = self.keys.write().await; - - keys.clear(); - - for key in keys_from_database { - let _ = keys.insert(key.key.clone(), key); - } - - Ok(()) - } - - // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.add_torrent_to_database_whitelist(info_hash).await?; - self.add_torrent_to_memory_whitelist(info_hash).await; - Ok(()) - } - - async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.add_info_hash_to_whitelist(*info_hash).await?; - Ok(()) - } - - pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.insert(*info_hash) - } - - // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.remove_info_hash_from_whitelist(*info_hash).await?; - self.whitelist.write().await.remove(info_hash); - Ok(()) - } - - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.whitelist.read().await.contains(info_hash) - } - - pub async fn load_whitelist(&self) -> Result<(), database::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist().await?; - let mut whitelist = self.whitelist.write().await; - - whitelist.clear(); - - for info_hash in whitelisted_torrents_from_database { - let _ = whitelist.insert(info_hash); - } - - Ok(()) - } - - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { - // no authentication needed in public mode - if self.is_public() { - return Ok(()); - } - - // check if auth_key is set and valid - if self.is_private() { - match key { - Some(key) => { - if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid); - } - } - None => { - return Err(TorrentError::PeerNotAuthenticated); - } - } - } - - // check if info_hash is whitelisted - if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted); - } - - Ok(()) - } - - // Loading the torrents from database into memory - pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { - let persistent_torrents = self.database.load_persistent_torrents().await?; - let mut torrents = self.torrents.write().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { - continue; - } - - let torrent_entry = TorrentEntry { - peers: Default::default(), - completed, - }; - - torrents.insert(info_hash, torrent_entry); - } - - Ok(()) - } - - /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { - let read_lock = self.torrents.read().await; - - match read_lock.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), - } - } - - /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.read().await; - - match read_lock.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), - } - } - - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { - let mut torrents = self.torrents.write().await; - - let torrent_entry = match torrents.entry(*info_hash) { - Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), - Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.update_peer(peer); - - // todo: move this action to a separate worker - if self.config.persistent_torrent_completed_stat && stats_updated { - let _ = self - .database - .save_persistent_torrent(info_hash, torrent_entry.completed) - .await; - } - - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - TorrentStats { - seeders, - leechers, - completed, - } - } - - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { - self.torrents.read().await - } - - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats_repository.get_stats().await - } - - pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { - match &self.stats_event_sender { - None => None, - Some(stats_event_sender) => stats_event_sender.send_event(event).await, - } - } - - // Remove inactive peers and (optionally) peerless torrents - pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.write().await; - - // If we don't need to remove torrents we will use the faster iter - if self.config.remove_peerless_torrents { - torrents_lock.retain(|_, torrent_entry| { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - - match self.config.persistent_torrent_completed_stat { - true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), - false => !torrent_entry.peers.is_empty(), - } - }); - } else { - for (_, torrent_entry) in torrents_lock.iter_mut() { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - } - } - } -} diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index cc4229b66..5514bc1eb 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -10,7 +10,7 @@ use super::connection_cookie::{check_connection_cookie, from_connection_id, into use crate::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; @@ -256,7 +256,7 @@ mod tests { use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, Time}; use crate::statistics::StatsTracker; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::{Configuration, PeerId}; fn default_tracker_config() -> Arc { @@ -374,7 +374,7 @@ mod tests { use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -546,7 +546,7 @@ mod tests { use mockall::predicate::eq; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -771,7 +771,7 @@ mod tests { use mockall::predicate::eq; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -952,7 +952,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use crate::statistics::StatsTracker; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1013,7 +1013,7 @@ mod tests { }; use super::TorrentPeerBuilder; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -1232,7 +1232,7 @@ mod tests { use super::sample_scrape_request; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; @@ -1265,7 +1265,7 @@ mod tests { use super::sample_scrape_request; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; diff --git a/src/udp/server.rs b/src/udp/server.rs index 11cb61d99..2f41c3c4d 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -6,7 +6,7 @@ use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::udp::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { diff --git a/tests/udp.rs b/tests/udp.rs index 0bc2a6506..c88dc9885 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -19,7 +19,7 @@ mod udp_tracker_server { use tokio::task::JoinHandle; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::StatsTracker; - use torrust_tracker::tracker::tracker::TorrentTracker; + use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; use torrust_tracker::{logging, static_time, Configuration}; From 4dd7326748b78a5ead86e98c71d1226a8fec8e91 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 11:52:53 +0100 Subject: [PATCH 134/435] ci: check code and clippy in test workflow --- .github/workflows/test_build_release.yml | 10 ++++++++++ cSpell.json | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 1266ae51f..4acf14277 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -36,6 +36,16 @@ jobs: toolchain: stable components: llvm-tools-preview - uses: Swatinem/rust-cache@v1 + - name: Check Rust Code + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-targets + - name: Clippy Rust Code + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --all-targets - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Run Tests diff --git a/cSpell.json b/cSpell.json index 2cc3e1179..1df69e4e7 100644 --- a/cSpell.json +++ b/cSpell.json @@ -12,6 +12,7 @@ "canonicalize", "canonicalized", "chrono", + "clippy", "completei", "downloadedi", "filesd", @@ -27,6 +28,7 @@ "Lphant", "mockall", "nanos", + "nextest", "nocapture", "ostr", "Pando", @@ -34,11 +36,13 @@ "repr", "rngs", "rusqlite", + "rustfmt", "Seedable", "Shareaza", "sharktorrent", "socketaddr", "sqllite", + "Swatinem", "Swiftbit", "thiserror", "Torrentstorm", From 23916a60a2e6881ec4336f1f995e57e2fea8c54d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Nov 2022 19:13:43 +0000 Subject: [PATCH 135/435] fix: [#108] revert change in auth key generation endpoint The response for the enpoint POST /api/key/:seconds_valid should be: ```json { "key": "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM", "valid_until": 1674804892 } ``` instead of: ```json { "key": "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM", "valid_until": { "secs": 1674804892, "nanos": 423855037 } } ``` It was propagated to the API after changing the internal struct `AuthKey` from: ```rust pub struct AuthKey { pub key: String, pub valid_until: Option, } ``` to: ```rust pub struct AuthKey { pub key: String, pub valid_until: Option, } ``` --- src/api/mod.rs | 1 + src/api/resources/auth_key_resource.rs | 57 ++++++++++++++++++++++++++ src/api/resources/mod.rs | 9 ++++ src/api/server.rs | 4 +- 4 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 src/api/resources/auth_key_resource.rs create mode 100644 src/api/resources/mod.rs diff --git a/src/api/mod.rs b/src/api/mod.rs index 74f47ad34..e08417133 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1 +1,2 @@ pub mod server; +pub mod resources; diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs new file mode 100644 index 000000000..4f74266f6 --- /dev/null +++ b/src/api/resources/auth_key_resource.rs @@ -0,0 +1,57 @@ +use serde::{Deserialize, Serialize}; + +use crate::key::AuthKey; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct AuthKeyResource { + pub key: String, + pub valid_until: Option, +} + +impl AuthKeyResource { + pub fn from_auth_key(auth_key: &AuthKey) -> Self { + Self { + key: auth_key.key.clone(), + valid_until: auth_key.valid_until.map(|duration| duration.as_secs()), + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use super::AuthKeyResource; + use crate::key::AuthKey; + use crate::protocol::clock::{DefaultClock, TimeNow}; + + #[test] + fn it_should_be_instantiated_from_an_auth_key() { + let expire_time = DefaultClock::add(&Duration::new(60, 0)).unwrap(); + + let auth_key_resource = AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(expire_time), + }; + + assert_eq!( + AuthKeyResource::from_auth_key(&auth_key_resource), + AuthKeyResource { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(expire_time.as_secs()) + } + ) + } + + #[test] + fn it_should_be_converted_to_json() { + assert_eq!( + serde_json::to_string(&AuthKeyResource { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(60) + }) + .unwrap(), + "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60}" // cspell:disable-line + ); + } +} diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs new file mode 100644 index 000000000..f7d24ee86 --- /dev/null +++ b/src/api/resources/mod.rs @@ -0,0 +1,9 @@ +//! These are the Rest API resources. +//! +//! WIP. Not all endpoints have their resource structs. +//! +//! - [x] AuthKeys +//! - [ ] ... +//! - [ ] ... +//! - [ ] ... +pub mod auth_key_resource; diff --git a/src/api/server.rs b/src/api/server.rs index 5a604aa0c..89d3bb38d 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -11,6 +11,8 @@ use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; +use super::resources::auth_key_resource::AuthKeyResource; + #[derive(Deserialize, Debug)] struct TorrentInfoQuery { offset: Option, @@ -267,7 +269,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp }) .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&auth_key)), + Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from_auth_key(&auth_key))), Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into(), })), From ede046082660a0bf69c1518e329413aac1959634 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Nov 2022 19:17:09 +0000 Subject: [PATCH 136/435] feat: [#108] add dev dependency reqwest Added for API end to end tests. --- Cargo.lock | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++-- Cargo.toml | 1 + 2 files changed, 100 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce66efa09..e3a6d9c09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -528,6 +528,15 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +[[package]] +name = "encoding_rs" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +dependencies = [ + "cfg-if", +] + [[package]] name = "env_logger" version = "0.8.4" @@ -941,9 +950,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -963,6 +972,19 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "iana-time-zone" version = "0.1.51" @@ -1035,6 +1057,12 @@ dependencies = [ "syn", ] +[[package]] +name = "ipnet" +version = "2.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" + [[package]] name = "itertools" version = "0.10.5" @@ -1857,6 +1885,43 @@ dependencies = [ "winapi", ] +[[package]] +name = "reqwest" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + [[package]] name = "ring" version = "0.16.20" @@ -2542,6 +2607,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.23.4" @@ -2621,6 +2696,7 @@ dependencies = [ "r2d2_mysql", "r2d2_sqlite", "rand", + "reqwest", "serde", "serde_bencode", "serde_json", @@ -2887,6 +2963,18 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.83" @@ -3076,6 +3164,15 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +[[package]] +name = "winreg" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +dependencies = [ + "winapi", +] + [[package]] name = "wyz" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index b2b256a2c..80e9009f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,3 +62,4 @@ uuid = { version = "1", features = ["v4"] } [dev-dependencies] mockall = "0.11" +reqwest = { version = "0.11.13", features = ["json"] } From 409f82af4cb76936da389b37deca05d8321710fa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Nov 2022 19:18:55 +0000 Subject: [PATCH 137/435] test: [#108] add e2e test for auth key generation API endpoint --- .gitignore | 1 + src/api/mod.rs | 2 +- src/api/resources/auth_key_resource.rs | 56 +++++++++--- src/api/resources/mod.rs | 4 +- src/api/server.rs | 5 +- tests/api.rs | 119 +++++++++++++++++++++++++ tests/common/mod.rs | 8 ++ tests/udp.rs | 19 ++-- 8 files changed, 185 insertions(+), 29 deletions(-) create mode 100644 tests/api.rs create mode 100644 tests/common/mod.rs diff --git a/.gitignore b/.gitignore index e2956b2d6..ba9ceeb53 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ /config.toml /data.db /.vscode/launch.json + diff --git a/src/api/mod.rs b/src/api/mod.rs index e08417133..46ad24218 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,2 +1,2 @@ -pub mod server; pub mod resources; +pub mod server; diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 4f74266f6..c38b7cc18 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -1,6 +1,9 @@ +use std::convert::From; + use serde::{Deserialize, Serialize}; use crate::key::AuthKey; +use crate::protocol::clock::DurationSinceUnixEpoch; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct AuthKeyResource { @@ -8,11 +11,22 @@ pub struct AuthKeyResource { pub valid_until: Option, } -impl AuthKeyResource { - pub fn from_auth_key(auth_key: &AuthKey) -> Self { - Self { - key: auth_key.key.clone(), - valid_until: auth_key.valid_until.map(|duration| duration.as_secs()), +impl From for AuthKey { + fn from(auth_key_resource: AuthKeyResource) -> Self { + AuthKey { + key: auth_key_resource.key, + valid_until: auth_key_resource + .valid_until + .map(|valid_until| DurationSinceUnixEpoch::new(valid_until, 0)), + } + } +} + +impl From for AuthKeyResource { + fn from(auth_key: AuthKey) -> Self { + AuthKeyResource { + key: auth_key.key, + valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), } } } @@ -26,25 +40,43 @@ mod tests { use crate::protocol::clock::{DefaultClock, TimeNow}; #[test] - fn it_should_be_instantiated_from_an_auth_key() { - let expire_time = DefaultClock::add(&Duration::new(60, 0)).unwrap(); + fn it_should_be_convertible_into_an_auth_key() { + let duration_in_secs = 60; + + let auth_key_resource = AuthKeyResource { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(duration_in_secs), + }; + + assert_eq!( + AuthKey::from(auth_key_resource), + AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()) + } + ) + } + + #[test] + fn it_should_be_convertible_from_an_auth_key() { + let duration_in_secs = 60; - let auth_key_resource = AuthKey { + let auth_key = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(expire_time), + valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; assert_eq!( - AuthKeyResource::from_auth_key(&auth_key_resource), + AuthKeyResource::from(auth_key), AuthKeyResource { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(expire_time.as_secs()) + valid_until: Some(duration_in_secs) } ) } #[test] - fn it_should_be_converted_to_json() { + fn it_should_be_convertible_into_json() { assert_eq!( serde_json::to_string(&AuthKeyResource { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index f7d24ee86..4b4f2214c 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -1,7 +1,7 @@ //! These are the Rest API resources. -//! +//! //! WIP. Not all endpoints have their resource structs. -//! +//! //! - [x] AuthKeys //! - [ ] ... //! - [ ] ... diff --git a/src/api/server.rs b/src/api/server.rs index 89d3bb38d..9f215710e 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -7,12 +7,11 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; +use super::resources::auth_key_resource::AuthKeyResource; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; -use super::resources::auth_key_resource::AuthKeyResource; - #[derive(Deserialize, Debug)] struct TorrentInfoQuery { offset: Option, @@ -269,7 +268,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp }) .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from_auth_key(&auth_key))), + Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from(auth_key))), Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into(), })), diff --git a/tests/api.rs b/tests/api.rs new file mode 100644 index 000000000..38966a81b --- /dev/null +++ b/tests/api.rs @@ -0,0 +1,119 @@ +/// Integration tests for the tracker API +/// +/// cargo test tracker_api -- --nocapture +extern crate rand; + +mod common; + +mod tracker_api { + use core::panic; + use std::env; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::Arc; + + use tokio::task::JoinHandle; + use tokio::time::{sleep, Duration}; + use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; + use torrust_tracker::jobs::tracker_api; + use torrust_tracker::tracker::key::AuthKey; + use torrust_tracker::tracker::statistics::StatsTracker; + use torrust_tracker::tracker::TorrentTracker; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration}; + + use crate::common::ephemeral_random_port; + + #[tokio::test] + async fn should_generate_a_new_auth_key() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let seconds_valid = 60; + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + + let url = format!("http://{}/api/key/{}?token={}", &bind_address, &seconds_valid, &api_token); + + let auth_key: AuthKeyResource = reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap(); + + // Verify the key with the tracker + assert!(api_server + .tracker + .unwrap() + .verify_auth_key(&AuthKey::from(auth_key)) + .await + .is_ok()); + } + + fn tracker_configuration() -> Arc { + let mut config = Configuration::default(); + config.log_level = Some("off".to_owned()); + + config.http_api.bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); + + // Temp database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join("data.db"); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + Arc::new(config) + } + + async fn new_running_api_server(configuration: Arc) -> ApiServer { + let mut api_server = ApiServer::new(); + api_server.start(configuration).await; + api_server + } + + pub struct ApiServer { + pub started: AtomicBool, + pub job: Option>, + pub bind_address: Option, + pub tracker: Option>, + } + + impl ApiServer { + pub fn new() -> Self { + Self { + started: AtomicBool::new(false), + job: None, + bind_address: None, + tracker: None, + } + } + + pub async fn start(&mut self, configuration: Arc) { + if !self.started.load(Ordering::Relaxed) { + self.bind_address = Some(configuration.http_api.bind_address.clone()); + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + self.tracker = Some(tracker.clone()); + + // Initialize logging + logging::setup_logging(&configuration); + + // Start the HTTP API job + self.job = Some(tracker_api::start_job(&configuration, tracker.clone())); + + self.started.store(true, Ordering::Relaxed); + + // Wait to give time to the API server to be ready to accept requests + sleep(Duration::from_millis(100)).await; + } + } + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs new file mode 100644 index 000000000..5fd484cf5 --- /dev/null +++ b/tests/common/mod.rs @@ -0,0 +1,8 @@ +use rand::{thread_rng, Rng}; + +pub fn ephemeral_random_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) +} diff --git a/tests/udp.rs b/tests/udp.rs index c88dc9885..ab96259c5 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -3,6 +3,8 @@ /// cargo test udp_tracker_server -- --nocapture extern crate rand; +mod common; + mod udp_tracker_server { use core::panic; use std::io::Cursor; @@ -14,14 +16,15 @@ mod udp_tracker_server { AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Request, Response, ScrapeRequest, TransactionId, }; - use rand::{thread_rng, Rng}; use tokio::net::UdpSocket; use tokio::task::JoinHandle; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker::{logging, static_time, Configuration}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration}; + + use crate::common::ephemeral_random_port; fn tracker_configuration() -> Arc { let mut config = Configuration::default(); @@ -50,6 +53,9 @@ mod udp_tracker_server { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + // Initialize stats tracker let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); @@ -162,15 +168,6 @@ mod udp_tracker_server { [0; MAX_PACKET_SIZE] } - /// Generates a random ephemeral port for a client source address - fn ephemeral_random_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could either use the same client for all tests (slower) or - // create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) - } - /// Generates the source address for the UDP client fn source_address(port: u16) -> String { format!("127.0.0.1:{}", port) From 9cfab4d009789994db6614f16d2f70b4ed5f4e4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Nov 2022 17:07:26 +0000 Subject: [PATCH 138/435] test: [#74] reproduce API bug with an e2e test The API endpoint to whitelist torrents returns an error if you try to whitelist the same torrent twice. This test reproduces that wrong behavior before fixing it. --- tests/api.rs | 44 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/tests/api.rs b/tests/api.rs index 38966a81b..8de220093 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -44,15 +44,53 @@ mod tracker_api { .is_ok()); } + #[tokio::test] + async fn should_whitelist_a_torrent() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let url = format!("http://{}/api/whitelist/{}?token={}", &bind_address, &info_hash, &api_token); + + let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + + assert_eq!(res.status(), 200); + } + + #[tokio::test] + async fn should_whitelist_a_torrent_that_has_been_already_whitelisted() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let url = format!("http://{}/api/whitelist/{}?token={}", &bind_address, &info_hash, &api_token); + + // First whitelist request + let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + assert_eq!(res.status(), 200); + + // Second whitelist request + let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + assert_eq!(res.status(), 200); + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); - config.http_api.bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); + // Ephemeral socket address + let port = ephemeral_random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &port); - // Temp database + // Ephemeral database let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join("data.db"); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); config.db_path = temp_file.to_str().unwrap().to_owned(); Arc::new(config) From 2d621c5e64f657e9be1f99af65ba8b160b9b68d0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Nov 2022 18:42:54 +0000 Subject: [PATCH 139/435] fix: [#74] bug calling the whitelist API endpoint twice --- cSpell.json | 1 + src/tracker/mod.rs | 14 ++++++++++++-- tests/api.rs | 10 +++++++++- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/cSpell.json b/cSpell.json index 1df69e4e7..c880bf3ae 100644 --- a/cSpell.json +++ b/cSpell.json @@ -34,6 +34,7 @@ "Pando", "Rasterbar", "repr", + "reqwest", "rngs", "rusqlite", "rustfmt", diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 77f51098a..a02e1123b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -106,9 +106,19 @@ impl TorrentTracker { Ok(()) } + /// It adds a torrent to the whitelist if it has not been whitelisted previously async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.add_info_hash_to_whitelist(*info_hash).await?; - Ok(()) + match self + .database + .get_info_hash_from_whitelist(&info_hash.to_owned().to_string()) + .await + { + Ok(_preexisting_info_hash) => Ok(()), + _ => { + self.database.add_info_hash_to_whitelist(*info_hash).await?; + Ok(()) + } + } } pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { diff --git a/tests/api.rs b/tests/api.rs index 8de220093..96af71d54 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -8,6 +8,7 @@ mod common; mod tracker_api { use core::panic; use std::env; + use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -18,7 +19,7 @@ mod tracker_api { use torrust_tracker::tracker::key::AuthKey; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration, InfoHash}; use crate::common::ephemeral_random_port; @@ -58,6 +59,13 @@ mod tracker_api { let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); assert_eq!(res.status(), 200); + assert!( + api_server + .tracker + .unwrap() + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); } #[tokio::test] From 167b749e3dabfa4928fa621a0c438863a3ddb127 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 21:31:47 +0100 Subject: [PATCH 140/435] db: check info_hash record not found instead dropping all errors --- src/databases/mysql.rs | 4 ++-- src/databases/sqlite.rs | 16 +++++++++------- src/tracker/mod.rs | 10 ++++++---- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index a4d870101..fc6ff5098 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -141,10 +141,10 @@ impl Database for MysqlDatabase { "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }, ) - .map_err(|_| database::Error::QueryReturnedNoRows)? + .map_err(|_| database::Error::DatabaseError)? { Some(info_hash) => Ok(InfoHash::from_str(&info_hash).unwrap()), - None => Err(database::Error::InvalidQuery), + None => Err(database::Error::QueryReturnedNoRows), } } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index ef9f12d9c..7a567b07e 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -137,13 +137,15 @@ impl Database for SqliteDatabase { let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; let mut rows = stmt.query([info_hash])?; - if let Some(row) = rows.next()? { - let info_hash: String = row.get(0).unwrap(); - - // should never be able to fail - Ok(InfoHash::from_str(&info_hash).unwrap()) - } else { - Err(database::Error::InvalidQuery) + match rows.next() { + Ok(row) => match row { + Some(row) => Ok(InfoHash::from_str(&row.get_unwrap::<_, String>(0)).unwrap()), + None => Err(database::Error::QueryReturnedNoRows), + }, + Err(e) => { + debug!("{:?}", e); + Err(database::Error::InvalidQuery) + } } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a02e1123b..8987f49f5 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -108,17 +108,19 @@ impl TorrentTracker { /// It adds a torrent to the whitelist if it has not been whitelisted previously async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - match self + if let Err(e) = self .database .get_info_hash_from_whitelist(&info_hash.to_owned().to_string()) .await { - Ok(_preexisting_info_hash) => Ok(()), - _ => { + if let database::Error::QueryReturnedNoRows = e { self.database.add_info_hash_to_whitelist(*info_hash).await?; - Ok(()) + } else { + eprintln!("{e}"); + return Err(e); } } + Ok(()) } pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { From 8af9834e57373a97673c804ec3a50014e0d171c6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 08:51:31 +0000 Subject: [PATCH 141/435] refactor: rename tests --- tests/api.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/api.rs b/tests/api.rs index 96af71d54..e4c23716d 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -24,7 +24,7 @@ mod tracker_api { use crate::common::ephemeral_random_port; #[tokio::test] - async fn should_generate_a_new_auth_key() { + async fn should_allow_generating_a_new_auth_key() { let configuration = tracker_configuration(); let api_server = new_running_api_server(configuration.clone()).await; @@ -46,7 +46,7 @@ mod tracker_api { } #[tokio::test] - async fn should_whitelist_a_torrent() { + async fn should_allow_whitelisting_a_torrent() { let configuration = tracker_configuration(); let api_server = new_running_api_server(configuration.clone()).await; @@ -69,7 +69,7 @@ mod tracker_api { } #[tokio::test] - async fn should_whitelist_a_torrent_that_has_been_already_whitelisted() { + async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { let configuration = tracker_configuration(); let api_server = new_running_api_server(configuration.clone()).await; From ed5c1edaaaed067337154cfd0bfaa943a28bba8f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 09:15:22 +0000 Subject: [PATCH 142/435] refactor: extract fn is_info_hash_whitelisted --- src/databases/database.rs | 11 +++++++++++ src/tracker/mod.rs | 16 +++++----------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index c67f39a54..795be0d45 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -53,6 +53,17 @@ pub trait Database: Sync + Send { async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result; async fn remove_key_from_keys(&self, key: &str) -> Result; + + async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { + if let Err(e) = self.get_info_hash_from_whitelist(&info_hash.to_owned().to_string()).await { + if let Error::QueryReturnedNoRows = e { + return Ok(false); + } else { + return Err(e); + } + } + Ok(true) + } } #[derive(Debug, Display, PartialEq, Eq, Error)] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 8987f49f5..a3eecd427 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -108,18 +108,12 @@ impl TorrentTracker { /// It adds a torrent to the whitelist if it has not been whitelisted previously async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - if let Err(e) = self - .database - .get_info_hash_from_whitelist(&info_hash.to_owned().to_string()) - .await - { - if let database::Error::QueryReturnedNoRows = e { - self.database.add_info_hash_to_whitelist(*info_hash).await?; - } else { - eprintln!("{e}"); - return Err(e); - } + if self.database.is_info_hash_whitelisted(info_hash).await.unwrap() { + return Ok(()); } + + self.database.add_info_hash_to_whitelist(*info_hash).await?; + Ok(()) } From 32a6d79ea3c53401a14a44dede63e039330675e1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 10:43:36 +0000 Subject: [PATCH 143/435] fix: [#74] send message from API when is ready In the e2e tests we needed to wait until the API server is ready to accept request. We were waiting a random duration (100 milliseconds). Now we send a message from the API when is ready to the initiator. In production code is not used. --- cSpell.json | 1 + src/api/server.rs | 13 ++++++++++++- src/jobs/tracker_api.rs | 17 +++++++++++++---- src/setup.rs | 3 ++- tests/api.rs | 11 +++++++---- 5 files changed, 35 insertions(+), 10 deletions(-) diff --git a/cSpell.json b/cSpell.json index c880bf3ae..a2c4235c4 100644 --- a/cSpell.json +++ b/cSpell.json @@ -30,6 +30,7 @@ "nanos", "nextest", "nocapture", + "oneshot", "ostr", "Pando", "Rasterbar", diff --git a/src/api/server.rs b/src/api/server.rs index 9f215710e..984aeb2e6 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -5,9 +5,11 @@ use std::sync::Arc; use std::time::Duration; use serde::{Deserialize, Serialize}; +use tokio::sync::oneshot::Sender; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; +use crate::jobs::tracker_api::ApiReady; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -88,7 +90,11 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> impl warp::Future { +pub fn start( + socket_addr: SocketAddr, + tracker: Arc, + messenger_to_initiator: Sender, +) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -343,6 +349,11 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); + // Send a message to the initiator to notify the API is ready to accept requests + if messenger_to_initiator.send(ApiReady()).is_err() { + panic!("the receiver dropped"); + } + let (_addr, api_server) = serve(server).bind_with_graceful_shutdown(socket_addr, async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 97b1fa3b0..169ba2edb 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -1,21 +1,30 @@ use std::sync::Arc; use log::info; +use tokio::sync::oneshot::{self, Receiver}; use tokio::task::JoinHandle; use crate::api::server; use crate::tracker::TorrentTracker; use crate::Configuration; -pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { +#[derive(Debug)] +pub struct ApiReady(); + +pub fn start_job(config: &Configuration, tracker: Arc) -> (JoinHandle<()>, Receiver) { let bind_addr = config .http_api .bind_address .parse::() .expect("Tracker API bind_address invalid."); + + let (tx, rx) = oneshot::channel::(); + info!("Starting Torrust API server on: {}", bind_addr); - tokio::spawn(async move { - server::start(bind_addr, tracker).await; - }) + let join_handle = tokio::spawn(async move { + server::start(bind_addr, tracker, tx).await; + }); + + (join_handle, rx) } diff --git a/src/setup.rs b/src/setup.rs index 2ecc1c143..52bb64f01 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -49,7 +49,8 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Start HTTP API server if config.http_api.enabled { - jobs.push(tracker_api::start_job(config, tracker.clone())); + let (join_handle, _receiver) = tracker_api::start_job(config, tracker.clone()); + jobs.push(join_handle); } // Remove torrents without peers, every interval diff --git a/tests/api.rs b/tests/api.rs index e4c23716d..251fca2b1 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -13,7 +13,6 @@ mod tracker_api { use std::sync::Arc; use tokio::task::JoinHandle; - use tokio::time::{sleep, Duration}; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; use torrust_tracker::jobs::tracker_api; use torrust_tracker::tracker::key::AuthKey; @@ -153,12 +152,16 @@ mod tracker_api { logging::setup_logging(&configuration); // Start the HTTP API job - self.job = Some(tracker_api::start_job(&configuration, tracker.clone())); + let (join_handle, api_receiver) = tracker_api::start_job(&configuration, tracker.clone()); + self.job = Some(join_handle); self.started.store(true, Ordering::Relaxed); - // Wait to give time to the API server to be ready to accept requests - sleep(Duration::from_millis(100)).await; + // Wait until the API is ready + match api_receiver.await { + Ok(msg) => println!("Message received from API server: {:?}", msg), + Err(_) => panic!("the api server dropped"), + } } } } From 15aa8313fde6aeb82daa563bc9bc4ca902402808 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 14:04:23 +0000 Subject: [PATCH 144/435] refactor: api job starter waits until api is ready There are two main changes: - The API server does not send the message when is ready. The job starter waits until the API server is running. This change is less radical becuase we keep the `start_job` return type as the other job starters. We did not want to send a real message from the API. We only wanted to know that the API thread is up and running. - The job starter waits until the API job is running even in production code. In the previous version we did that only for the e2e tests. --- src/api/server.rs | 13 +------------ src/jobs/tracker_api.rs | 24 +++++++++++++++++------- src/setup.rs | 3 +-- tests/api.rs | 9 +-------- 4 files changed, 20 insertions(+), 29 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index 984aeb2e6..9f215710e 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -5,11 +5,9 @@ use std::sync::Arc; use std::time::Duration; use serde::{Deserialize, Serialize}; -use tokio::sync::oneshot::Sender; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; -use crate::jobs::tracker_api::ApiReady; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -90,11 +88,7 @@ fn authenticate(tokens: HashMap) -> impl Filter, - messenger_to_initiator: Sender, -) -> impl warp::Future { +pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -349,11 +343,6 @@ pub fn start( let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); - // Send a message to the initiator to notify the API is ready to accept requests - if messenger_to_initiator.send(ApiReady()).is_err() { - panic!("the receiver dropped"); - } - let (_addr, api_server) = serve(server).bind_with_graceful_shutdown(socket_addr, async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 169ba2edb..7b5797391 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use log::info; -use tokio::sync::oneshot::{self, Receiver}; +use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::api::server; @@ -9,22 +9,32 @@ use crate::tracker::TorrentTracker; use crate::Configuration; #[derive(Debug)] -pub struct ApiReady(); +pub struct ApiServerJobStarted(); -pub fn start_job(config: &Configuration, tracker: Arc) -> (JoinHandle<()>, Receiver) { +pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .http_api .bind_address .parse::() .expect("Tracker API bind_address invalid."); - let (tx, rx) = oneshot::channel::(); - info!("Starting Torrust API server on: {}", bind_addr); + let (tx, rx) = oneshot::channel::(); + + // Run the API server let join_handle = tokio::spawn(async move { - server::start(bind_addr, tracker, tx).await; + if tx.send(ApiServerJobStarted()).is_err() { + panic!("the start job dropped"); + } + server::start(bind_addr, tracker).await; }); - (join_handle, rx) + // Wait until the API server job is running + match rx.await { + Ok(_msg) => info!("Torrust API server started"), + Err(_) => panic!("the api server dropped"), + } + + join_handle } diff --git a/src/setup.rs b/src/setup.rs index 52bb64f01..9906a2d03 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -49,8 +49,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Start HTTP API server if config.http_api.enabled { - let (join_handle, _receiver) = tracker_api::start_job(config, tracker.clone()); - jobs.push(join_handle); + jobs.push(tracker_api::start_job(config, tracker.clone()).await); } // Remove torrents without peers, every interval diff --git a/tests/api.rs b/tests/api.rs index 251fca2b1..278f9d4fb 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -152,16 +152,9 @@ mod tracker_api { logging::setup_logging(&configuration); // Start the HTTP API job - let (join_handle, api_receiver) = tracker_api::start_job(&configuration, tracker.clone()); - self.job = Some(join_handle); + self.job = Some(tracker_api::start_job(&configuration, tracker).await); self.started.store(true, Ordering::Relaxed); - - // Wait until the API is ready - match api_receiver.await { - Ok(msg) => println!("Message received from API server: {:?}", msg), - Err(_) => panic!("the api server dropped"), - } } } } From 5274b2c067aeea9d1bf344930fe7f1bdea794627 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 16:09:57 +0000 Subject: [PATCH 145/435] fix: [#74] send api ready event after starting the api server Cameron Garnham (@da2ce7) suggested this change. It's better to send the event after spwaning the API server task. --- src/jobs/tracker_api.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 7b5797391..ba5b8a1fb 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -24,10 +24,13 @@ pub async fn start_job(config: &Configuration, tracker: Arc) -> // Run the API server let join_handle = tokio::spawn(async move { + let handel = server::start(bind_addr, tracker); + if tx.send(ApiServerJobStarted()).is_err() { panic!("the start job dropped"); } - server::start(bind_addr, tracker).await; + + handel.await; }); // Wait until the API server job is running From ea92ceb61c1d765e2ba882186da97b433996b971 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 17:58:50 +0000 Subject: [PATCH 146/435] test: [#61] add e2e test to API torrent info endpoint before refactoring --- src/api/resources/mod.rs | 1 + src/api/resources/torrent_resource.rs | 26 ++++++++++ tests/api.rs | 75 ++++++++++++++++++++++++++- 3 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 src/api/resources/torrent_resource.rs diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index 4b4f2214c..a229539dd 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -7,3 +7,4 @@ //! - [ ] ... //! - [ ] ... pub mod auth_key_resource; +pub mod torrent_resource; diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs new file mode 100644 index 000000000..c9f6a1451 --- /dev/null +++ b/src/api/resources/torrent_resource.rs @@ -0,0 +1,26 @@ +use serde::Deserialize; + +#[derive(Deserialize, Debug, PartialEq)] +pub struct TorrentResource { + pub info_hash: String, + pub completed: u32, + pub leechers: u32, + pub peers: Vec, +} + +#[derive(Deserialize, Debug, PartialEq)] +pub struct TorrentPeerResource { + pub peer_id: PeerIdResource, + pub peer_addr: String, + pub updated: i64, + pub uploaded: i64, + pub downloaded: i64, + pub left: i64, + pub event: String, +} + +#[derive(Deserialize, Debug, PartialEq)] +pub struct PeerIdResource { + pub id: String, + pub client: String, +} diff --git a/tests/api.rs b/tests/api.rs index 278f9d4fb..2a0ded24a 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -8,17 +8,22 @@ mod common; mod tracker_api { use core::panic; use std::env; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use tokio::task::JoinHandle; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; + use torrust_tracker::api::resources::torrent_resource::{PeerIdResource, TorrentPeerResource, TorrentResource}; use torrust_tracker::jobs::tracker_api; + use torrust_tracker::peer::TorrentPeer; + use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::tracker::key::AuthKey; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration, InfoHash}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration, InfoHash, PeerId}; use crate::common::ephemeral_random_port; @@ -87,6 +92,74 @@ mod tracker_api { assert_eq!(res.status(), 200); } + fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { + ( + TorrentPeer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1669397478934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }, + TorrentPeerResource { + peer_id: PeerIdResource { + id: "2d71423030303030303030303030303030303030".to_string(), + client: "qBittorrent".to_string(), + }, + peer_addr: "126.0.0.1:8080".to_string(), + updated: 1669397478934000i64, + uploaded: 0i64, + downloaded: 0i64, + left: 0i64, + event: "Started".to_string(), + }, + ) + } + + #[tokio::test] + async fn should_allow_getting_a_torrent_info() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + + let (peer, peer_resource) = sample_torrent_peer(); + + // Add the torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let url = format!("http://{}/api/torrent/{}?token={}", &bind_address, &info_hash, &api_token); + + let torrent_resource = reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + assert_eq!( + torrent_resource, + TorrentResource { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + completed: 0, + leechers: 0, + peers: vec![peer_resource] + } + ); + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); From 801dfe6d8df0292f5e9afe25ebcf265d53c0834d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Nov 2022 19:27:13 +0000 Subject: [PATCH 147/435] refactor: [#61] use TorrentResource in torrent info API endpoint --- src/api/resources/torrent_resource.rs | 29 +++++++++++++++++++-------- src/api/server.rs | 21 ++++++++++++++++--- src/protocol/common.rs | 17 +++++++++------- tests/api.rs | 15 +++++++------- 4 files changed, 57 insertions(+), 25 deletions(-) diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index c9f6a1451..3c59852e1 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,26 +1,39 @@ -use serde::Deserialize; +use serde::{Deserialize, Serialize}; -#[derive(Deserialize, Debug, PartialEq)] +use crate::PeerId; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct TorrentResource { pub info_hash: String, + pub seeders: u32, pub completed: u32, pub leechers: u32, - pub peers: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub peers: Option>, } -#[derive(Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct TorrentPeerResource { pub peer_id: PeerIdResource, pub peer_addr: String, - pub updated: i64, + pub updated: u128, pub uploaded: i64, pub downloaded: i64, pub left: i64, pub event: String, } -#[derive(Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct PeerIdResource { - pub id: String, - pub client: String, + pub id: Option, + pub client: Option, +} + +impl From for PeerIdResource { + fn from(peer_id: PeerId) -> Self { + PeerIdResource { + id: peer_id.get_id(), + client: peer_id.get_client_name().map(|client_name| client_name.to_string()), + } + } } diff --git a/src/api/server.rs b/src/api/server.rs index 9f215710e..06e2af251 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; +use super::resources::torrent_resource::{PeerIdResource, TorrentPeerResource, TorrentResource}; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -109,6 +110,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .iter() .map(|(info_hash, torrent_entry)| { let (seeders, completed, leechers) = torrent_entry.get_stats(); + // todo: use TorrentResource Torrent { info_hash, seeders, @@ -206,12 +208,25 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let peers = torrent_entry.get_peers(None); - Ok(reply::json(&Torrent { - info_hash: &info_hash, + let peer_resources = peers + .iter() + .map(|peer| TorrentPeerResource { + peer_id: PeerIdResource::from(peer.peer_id.clone()), + peer_addr: peer.peer_addr.to_string(), + updated: peer.updated.as_millis(), + uploaded: peer.uploaded.0, + downloaded: peer.downloaded.0, + left: peer.left.0, + event: format!("{:?}", peer.event), + }) + .collect(); + + Ok(reply::json(&TorrentResource { + info_hash: info_hash.to_string(), seeders, completed, leechers, - peers: Some(peers), + peers: Some(peer_resources), })) }); diff --git a/src/protocol/common.rs b/src/protocol/common.rs index 431521764..da6d95e40 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -232,6 +232,14 @@ impl std::fmt::Display for PeerId { } impl PeerId { + pub fn get_id(&self) -> Option { + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + + std::str::from_utf8(&tmp).ok().map(|id| id.to_string()) + } + pub fn get_client_name(&self) -> Option<&'static str> { if self.0[0] == b'M' { return Some("BitTorrent"); @@ -316,19 +324,14 @@ impl Serialize for PeerId { where S: serde::Serializer, { - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - let id = std::str::from_utf8(&tmp).ok(); - #[derive(Serialize)] struct PeerIdInfo<'a> { - id: Option<&'a str>, + id: Option, client: Option<&'a str>, } let obj = PeerIdInfo { - id, + id: self.get_id(), client: self.get_client_name(), }; obj.serialize(serializer) diff --git a/tests/api.rs b/tests/api.rs index 2a0ded24a..a5606b0a9 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -105,14 +105,14 @@ mod tracker_api { }, TorrentPeerResource { peer_id: PeerIdResource { - id: "2d71423030303030303030303030303030303030".to_string(), - client: "qBittorrent".to_string(), + id: Some("2d71423030303030303030303030303030303030".to_string()), + client: Some("qBittorrent".to_string()), }, peer_addr: "126.0.0.1:8080".to_string(), - updated: 1669397478934000i64, - uploaded: 0i64, - downloaded: 0i64, - left: 0i64, + updated: 1669397478934000, + uploaded: 0, + downloaded: 0, + left: 0, event: "Started".to_string(), }, ) @@ -153,9 +153,10 @@ mod tracker_api { torrent_resource, TorrentResource { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, completed: 0, leechers: 0, - peers: vec![peer_resource] + peers: Some(vec![peer_resource]) } ); } From 7298701f5d92d854139eedd296606dbc78f5e080 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 13:41:27 +0000 Subject: [PATCH 148/435] refactor: [#61] extract converter from TorrentPeer to TorrentPeerResource --- src/api/resources/torrent_resource.rs | 15 ++++++++++++ src/api/server.rs | 15 ++---------- src/protocol/common.rs | 2 +- src/tracker/peer.rs | 2 +- tests/api.rs | 35 +++++++++------------------ 5 files changed, 31 insertions(+), 38 deletions(-) diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 3c59852e1..ecf2a3fda 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,5 +1,6 @@ use serde::{Deserialize, Serialize}; +use crate::peer::TorrentPeer; use crate::PeerId; #[derive(Serialize, Deserialize, Debug, PartialEq)] @@ -37,3 +38,17 @@ impl From for PeerIdResource { } } } + +impl From for TorrentPeerResource { + fn from(peer: TorrentPeer) -> Self { + TorrentPeerResource { + peer_id: PeerIdResource::from(peer.peer_id), + peer_addr: peer.peer_addr.to_string(), + updated: peer.updated.as_millis(), + uploaded: peer.uploaded.0, + downloaded: peer.downloaded.0, + left: peer.left.0, + event: format!("{:?}", peer.event), + } + } +} diff --git a/src/api/server.rs b/src/api/server.rs index 06e2af251..85c177b8b 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; -use super::resources::torrent_resource::{PeerIdResource, TorrentPeerResource, TorrentResource}; +use super::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; use crate::peer::TorrentPeer; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -208,18 +208,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let peers = torrent_entry.get_peers(None); - let peer_resources = peers - .iter() - .map(|peer| TorrentPeerResource { - peer_id: PeerIdResource::from(peer.peer_id.clone()), - peer_addr: peer.peer_addr.to_string(), - updated: peer.updated.as_millis(), - uploaded: peer.uploaded.0, - downloaded: peer.downloaded.0, - left: peer.left.0, - event: format!("{:?}", peer.event), - }) - .collect(); + let peer_resources = peers.iter().map(|peer| TorrentPeerResource::from(**peer)).collect(); Ok(reply::json(&TorrentResource { info_hash: info_hash.to_string(), diff --git a/src/protocol/common.rs b/src/protocol/common.rs index da6d95e40..ce1cbf253 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -217,7 +217,7 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { } } -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord)] +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] pub struct PeerId(pub [u8; 20]); impl std::fmt::Display for PeerId { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 7a2599f82..42ef6a60b 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -9,7 +9,7 @@ use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; -#[derive(PartialEq, Eq, Debug, Clone, Serialize)] +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] pub struct TorrentPeer { pub peer_id: PeerId, pub peer_addr: SocketAddr, diff --git a/tests/api.rs b/tests/api.rs index a5606b0a9..0f6214ffb 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -93,29 +93,18 @@ mod tracker_api { } fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { - ( - TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1669397478934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }, - TorrentPeerResource { - peer_id: PeerIdResource { - id: Some("2d71423030303030303030303030303030303030".to_string()), - client: Some("qBittorrent".to_string()), - }, - peer_addr: "126.0.0.1:8080".to_string(), - updated: 1669397478934000, - uploaded: 0, - downloaded: 0, - left: 0, - event: "Started".to_string(), - }, - ) + let torrent_peer = TorrentPeer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1669397478934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + let torrent_peer_resource = TorrentPeerResource::from(torrent_peer); + + (torrent_peer, torrent_peer_resource) } #[tokio::test] From 284c91be299e814c7df6ac33ec050e69817085e8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 15:45:10 +0000 Subject: [PATCH 149/435] test: [#61] add e2e test for torrent list API endpoint --- src/tracker/torrent.rs | 4 ++-- tests/api.rs | 45 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 4e602d359..335554006 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -32,7 +32,7 @@ impl TorrentEntry { let _ = self.peers.remove(&peer.peer_id); } AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); + let peer_old = self.peers.insert(peer.peer_id, *peer); // Don't count if peer was not previously known if peer_old.is_some() { self.completed += 1; @@ -40,7 +40,7 @@ impl TorrentEntry { } } _ => { - let _ = self.peers.insert(peer.peer_id.clone(), peer.clone()); + let _ = self.peers.insert(peer.peer_id, *peer); } } diff --git a/tests/api.rs b/tests/api.rs index 0f6214ffb..ce419724a 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,7 +16,7 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use tokio::task::JoinHandle; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; - use torrust_tracker::api::resources::torrent_resource::{PeerIdResource, TorrentPeerResource, TorrentResource}; + use torrust_tracker::api::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; use torrust_tracker::jobs::tracker_api; use torrust_tracker::peer::TorrentPeer; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; @@ -150,6 +150,49 @@ mod tracker_api { ); } + #[tokio::test] + async fn should_allow_getting_torrents() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + + let (peer, _peer_resource) = sample_torrent_peer(); + + // Add the torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let url = format!("http://{}/api/torrents?token={}", &bind_address, &api_token); + + let torrent_resources = reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap(); + + assert_eq!( + torrent_resources, + vec![TorrentResource { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include peer list + }] + ); + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); From b974ce0eba7614bbe1ce79b03a73ab20143b75f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 16:05:41 +0000 Subject: [PATCH 150/435] refactor: [#61] use TorrentListItemResource in torrent list API endpoint --- src/api/resources/mod.rs | 4 ++-- src/api/resources/torrent_resource.rs | 10 ++++++++++ src/api/server.rs | 18 +++--------------- 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index a229539dd..e139207b5 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -3,8 +3,8 @@ //! WIP. Not all endpoints have their resource structs. //! //! - [x] AuthKeys -//! - [ ] ... -//! - [ ] ... +//! - [ ] TorrentResource, TorrentListItemResource, TorrentPeerResource, PeerIdResource +//! - [ ] StatsResource //! - [ ] ... pub mod auth_key_resource; pub mod torrent_resource; diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index ecf2a3fda..88d0463cb 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -13,6 +13,16 @@ pub struct TorrentResource { pub peers: Option>, } +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct TorrentListItemResource { + pub info_hash: String, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + // todo: this is always None. Remove field from endpoint? + pub peers: Option>, +} + #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct TorrentPeerResource { pub peer_id: PeerIdResource, diff --git a/src/api/server.rs b/src/api/server.rs index 85c177b8b..ef514749b 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -8,8 +8,7 @@ use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; -use super::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; -use crate::peer::TorrentPeer; +use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -19,16 +18,6 @@ struct TorrentInfoQuery { limit: Option, } -#[derive(Serialize)] -struct Torrent<'a> { - info_hash: &'a InfoHash, - seeders: u32, - completed: u32, - leechers: u32, - #[serde(skip_serializing_if = "Option::is_none")] - peers: Option>, -} - #[derive(Serialize)] struct Stats { torrents: u32, @@ -110,9 +99,8 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .iter() .map(|(info_hash, torrent_entry)| { let (seeders, completed, leechers) = torrent_entry.get_stats(); - // todo: use TorrentResource - Torrent { - info_hash, + TorrentListItemResource { + info_hash: info_hash.to_string(), seeders, completed, leechers, From 7e03714ef49076ac562ca9fc8179dd7495534e82 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 16:29:40 +0000 Subject: [PATCH 151/435] refactor: [#61] use StatsResource in API stats endpoint --- src/api/resources/mod.rs | 1 + src/api/resources/stats_resource.rs | 21 ++++++++++ src/api/server.rs | 23 +---------- tests/api.rs | 59 ++++++++++++++++++++++++++++- 4 files changed, 81 insertions(+), 23 deletions(-) create mode 100644 src/api/resources/stats_resource.rs diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index e139207b5..d214d8a59 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -7,4 +7,5 @@ //! - [ ] StatsResource //! - [ ] ... pub mod auth_key_resource; +pub mod stats_resource; pub mod torrent_resource; diff --git a/src/api/resources/stats_resource.rs b/src/api/resources/stats_resource.rs new file mode 100644 index 000000000..7fc9f1376 --- /dev/null +++ b/src/api/resources/stats_resource.rs @@ -0,0 +1,21 @@ +use serde::{Serialize, Deserialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct StatsResource { + pub torrents: u32, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + pub tcp4_connections_handled: u32, + pub tcp4_announces_handled: u32, + pub tcp4_scrapes_handled: u32, + pub tcp6_connections_handled: u32, + pub tcp6_announces_handled: u32, + pub tcp6_scrapes_handled: u32, + pub udp4_connections_handled: u32, + pub udp4_announces_handled: u32, + pub udp4_scrapes_handled: u32, + pub udp6_connections_handled: u32, + pub udp6_announces_handled: u32, + pub udp6_scrapes_handled: u32, +} diff --git a/src/api/server.rs b/src/api/server.rs index ef514749b..41e6f7074 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; +use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use crate::protocol::common::*; use crate::tracker::TorrentTracker; @@ -18,26 +19,6 @@ struct TorrentInfoQuery { limit: Option, } -#[derive(Serialize)] -struct Stats { - torrents: u32, - seeders: u32, - completed: u32, - leechers: u32, - tcp4_connections_handled: u32, - tcp4_announces_handled: u32, - tcp4_scrapes_handled: u32, - tcp6_connections_handled: u32, - tcp6_announces_handled: u32, - tcp6_scrapes_handled: u32, - udp4_connections_handled: u32, - udp4_announces_handled: u32, - udp4_scrapes_handled: u32, - udp6_connections_handled: u32, - udp6_announces_handled: u32, - udp6_scrapes_handled: u32, -} - #[derive(Serialize, Debug)] #[serde(tag = "status", rename_all = "snake_case")] enum ActionStatus<'a> { @@ -122,7 +103,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::end()) .map(move || api_stats.clone()) .and_then(|tracker: Arc| async move { - let mut results = Stats { + let mut results = StatsResource { torrents: 0, seeders: 0, completed: 0, diff --git a/tests/api.rs b/tests/api.rs index ce419724a..37cdd5415 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,6 +16,7 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use tokio::task::JoinHandle; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; + use torrust_tracker::api::resources::stats_resource::StatsResource; use torrust_tracker::api::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; use torrust_tracker::jobs::tracker_api; use torrust_tracker::peer::TorrentPeer; @@ -118,7 +119,7 @@ mod tracker_api { let (peer, peer_resource) = sample_torrent_peer(); - // Add the torrent to the tracker + // Add a torrent to the tracker api_server .tracker .unwrap() @@ -161,7 +162,7 @@ mod tracker_api { let (peer, _peer_resource) = sample_torrent_peer(); - // Add the torrent to the tracker + // Add a torrent to the tracker api_server .tracker .unwrap() @@ -193,6 +194,60 @@ mod tracker_api { ); } + #[tokio::test] + async fn should_allow_getting_tracker_statistics() { + let configuration = tracker_configuration(); + let api_server = new_running_api_server(configuration.clone()).await; + + let bind_address = api_server.bind_address.unwrap().clone(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); + + let (peer, _peer_resource) = sample_torrent_peer(); + + // Add a torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let url = format!("http://{}/api/stats?token={}", &bind_address, &api_token); + + let stats = reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + assert_eq!( + stats, + StatsResource { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + } + ); + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); From bc3d246fc4b5d3ed11b0831abd3dffe722a8dad0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 16:46:16 +0000 Subject: [PATCH 152/435] feat(api): in torrent endpoint rename field to Marked as deprecated. It will be a breaking change in version v3.0.0. --- src/api/resources/stats_resource.rs | 2 +- src/api/resources/torrent_resource.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api/resources/stats_resource.rs b/src/api/resources/stats_resource.rs index 7fc9f1376..2fbaf42c1 100644 --- a/src/api/resources/stats_resource.rs +++ b/src/api/resources/stats_resource.rs @@ -1,4 +1,4 @@ -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct StatsResource { diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 88d0463cb..11e9d7196 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -27,7 +27,9 @@ pub struct TorrentListItemResource { pub struct TorrentPeerResource { pub peer_id: PeerIdResource, pub peer_addr: String, + #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] pub updated: u128, + pub updated_milliseconds_ago: u128, pub uploaded: i64, pub downloaded: i64, pub left: i64, @@ -55,6 +57,7 @@ impl From for TorrentPeerResource { peer_id: PeerIdResource::from(peer.peer_id), peer_addr: peer.peer_addr.to_string(), updated: peer.updated.as_millis(), + updated_milliseconds_ago: peer.updated.as_millis(), uploaded: peer.uploaded.0, downloaded: peer.downloaded.0, left: peer.left.0, From e1b84f6eb75dcd9ba0cc6803a0f7221d9b761ef8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 19:37:09 +0000 Subject: [PATCH 153/435] refactor: [#61] extract struct ApiClient for API testing --- tests/api.rs | 251 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 154 insertions(+), 97 deletions(-) diff --git a/tests/api.rs b/tests/api.rs index 37cdd5415..475da9a24 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -14,10 +14,11 @@ mod tracker_api { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use reqwest::Response; use tokio::task::JoinHandle; use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; use torrust_tracker::api::resources::stats_resource::StatsResource; - use torrust_tracker::api::resources::torrent_resource::{TorrentPeerResource, TorrentResource}; + use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use torrust_tracker::jobs::tracker_api; use torrust_tracker::peer::TorrentPeer; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; @@ -30,16 +31,13 @@ mod tracker_api { #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); let seconds_valid = 60; - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); - let url = format!("http://{}/api/key/{}?token={}", &bind_address, &seconds_valid, &api_token); - - let auth_key: AuthKeyResource = reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap(); + let auth_key = ApiClient::new(api_server.get_connection_info().unwrap()) + .generate_auth_key(seconds_valid) + .await; // Verify the key with the tracker assert!(api_server @@ -52,16 +50,13 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let url = format!("http://{}/api/whitelist/{}?token={}", &bind_address, &info_hash, &api_token); - - let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + let res = ApiClient::new(api_server.get_connection_info().unwrap()) + .whitelist_a_torrent(&info_hash) + .await; assert_eq!(res.status(), 200); assert!( @@ -75,47 +70,25 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let url = format!("http://{}/api/whitelist/{}?token={}", &bind_address, &info_hash, &api_token); + let api_client = ApiClient::new(api_server.get_connection_info().unwrap()); - // First whitelist request - let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); - // Second whitelist request - let res = reqwest::Client::new().post(url.clone()).send().await.unwrap(); + let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); } - fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { - let torrent_peer = TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1669397478934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - let torrent_peer_resource = TorrentPeerResource::from(torrent_peer); - - (torrent_peer, torrent_peer_resource) - } - #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; + let api_connection_info = api_server.get_connection_info().unwrap(); - let bind_address = api_server.bind_address.unwrap().clone(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let (peer, peer_resource) = sample_torrent_peer(); @@ -126,18 +99,7 @@ mod tracker_api { .update_torrent_with_peer_and_get_stats(&info_hash, &peer) .await; - let url = format!("http://{}/api/torrent/{}?token={}", &bind_address, &info_hash, &api_token); - - let torrent_resource = reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::() - .await - .unwrap(); + let torrent_resource = ApiClient::new(api_connection_info).get_torrent(&info_hash.to_string()).await; assert_eq!( torrent_resource, @@ -153,15 +115,14 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_torrents() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let (peer, _peer_resource) = sample_torrent_peer(); + let api_connection_info = api_server.get_connection_info().unwrap(); + // Add a torrent to the tracker api_server .tracker @@ -169,22 +130,11 @@ mod tracker_api { .update_torrent_with_peer_and_get_stats(&info_hash, &peer) .await; - let url = format!("http://{}/api/torrents?token={}", &bind_address, &api_token); - - let torrent_resources = reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::>() - .await - .unwrap(); + let torrent_resources = ApiClient::new(api_connection_info).get_torrents().await; assert_eq!( torrent_resources, - vec![TorrentResource { + vec![TorrentListItemResource { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, @@ -196,15 +146,14 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let configuration = tracker_configuration(); - let api_server = new_running_api_server(configuration.clone()).await; + let api_server = ApiServer::new_running_instance().await; - let bind_address = api_server.bind_address.unwrap().clone(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let api_token = configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(); let (peer, _peer_resource) = sample_torrent_peer(); + let api_connection_info = api_server.get_connection_info().unwrap(); + // Add a torrent to the tracker api_server .tracker @@ -212,21 +161,10 @@ mod tracker_api { .update_torrent_with_peer_and_get_stats(&info_hash, &peer) .await; - let url = format!("http://{}/api/stats?token={}", &bind_address, &api_token); - - let stats = reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::() - .await - .unwrap(); + let stats_resource = ApiClient::new(api_connection_info).get_tracker_statistics().await; assert_eq!( - stats, + stats_resource, StatsResource { torrents: 1, seeders: 1, @@ -248,6 +186,21 @@ mod tracker_api { ); } + fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { + let torrent_peer = TorrentPeer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1669397478934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + let torrent_peer_resource = TorrentPeerResource::from(torrent_peer); + + (torrent_peer, torrent_peer_resource) + } + fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); @@ -264,17 +217,26 @@ mod tracker_api { Arc::new(config) } - async fn new_running_api_server(configuration: Arc) -> ApiServer { - let mut api_server = ApiServer::new(); - api_server.start(configuration).await; - api_server + #[derive(Clone)] + struct ApiConnectionInfo { + pub bind_address: String, + pub api_token: String, + } + + impl ApiConnectionInfo { + pub fn new(bind_address: &str, api_token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: api_token.to_string(), + } + } } - pub struct ApiServer { + struct ApiServer { pub started: AtomicBool, pub job: Option>, - pub bind_address: Option, pub tracker: Option>, + pub connection_info: Option, } impl ApiServer { @@ -282,14 +244,28 @@ mod tracker_api { Self { started: AtomicBool::new(false), job: None, - bind_address: None, tracker: None, + connection_info: None, } } + pub async fn new_running_instance() -> ApiServer { + let configuration = tracker_configuration(); + ApiServer::new_running_custom_instance(configuration.clone()).await + } + + async fn new_running_custom_instance(configuration: Arc) -> ApiServer { + let mut api_server = ApiServer::new(); + api_server.start(configuration).await; + api_server + } + pub async fn start(&mut self, configuration: Arc) { if !self.started.load(Ordering::Relaxed) { - self.bind_address = Some(configuration.http_api.bind_address.clone()); + self.connection_info = Some(ApiConnectionInfo::new( + &configuration.http_api.bind_address.clone(), + &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), + )); // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -318,5 +294,86 @@ mod tracker_api { self.started.store(true, Ordering::Relaxed); } } + + pub fn get_connection_info(&self) -> Option { + self.connection_info.clone() + } + } + + struct ApiClient { + connection_info: ApiConnectionInfo, + } + + impl ApiClient { + pub fn new(connection_info: ApiConnectionInfo) -> Self { + Self { connection_info } + } + + pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKeyResource { + let url = format!( + "http://{}/api/key/{}?token={}", + &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token + ); + reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap() + } + + pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { + let url = format!( + "http://{}/api/whitelist/{}?token={}", + &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token + ); + reqwest::Client::new().post(url.clone()).send().await.unwrap() + } + + pub async fn get_torrent(&self, info_hash: &str) -> TorrentResource { + let url = format!( + "http://{}/api/torrent/{}?token={}", + &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap() + } + + pub async fn get_torrents(&self) -> Vec { + let url = format!( + "http://{}/api/torrents?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap() + } + + pub async fn get_tracker_statistics(&self) -> StatsResource { + let url = format!( + "http://{}/api/stats?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap() + } } } From 7eb25a0d2c4524efdd6f6207a82d360607d4d92f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 17:58:51 +0100 Subject: [PATCH 154/435] refactor: remove internal mod exports --- src/api/resources/auth_key_resource.rs | 6 ++-- src/api/resources/stats_resource.rs | 2 +- src/api/resources/torrent_resource.rs | 13 +++---- src/config.rs | 13 ++----- src/databases/database.rs | 2 +- src/databases/mysql.rs | 2 +- src/databases/sqlite.rs | 2 +- src/http/filters.rs | 6 ++-- src/http/handlers.rs | 13 +++---- src/http/mod.rs | 8 ----- src/http/request.rs | 2 +- src/http/response.rs | 2 +- src/http/routes.rs | 5 ++- src/http/server.rs | 11 +++--- src/jobs/http_tracker.rs | 3 +- src/jobs/torrent_cleanup.rs | 2 +- src/jobs/tracker_api.rs | 2 +- src/jobs/udp_tracker.rs | 3 +- src/lib.rs | 8 ----- src/logging.rs | 2 +- src/main.rs | 3 +- src/protocol/common.rs | 3 +- src/setup.rs | 2 +- src/stats.rs | 2 +- src/tracker/key.rs | 2 +- src/tracker/mod.rs | 8 ++--- src/tracker/peer.rs | 22 ++++++------ src/tracker/statistics.rs | 4 +-- src/tracker/torrent.rs | 10 +++--- src/udp/connection_cookie.rs | 2 +- src/udp/handlers.rs | 49 +++++++++++++------------- src/udp/mod.rs | 5 --- src/udp/request.rs | 2 +- src/udp/server.rs | 3 +- tests/api.rs | 6 ++-- tests/udp.rs | 3 +- 36 files changed, 108 insertions(+), 125 deletions(-) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index c38b7cc18..4fc5d0cf9 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -2,10 +2,10 @@ use std::convert::From; use serde::{Deserialize, Serialize}; -use crate::key::AuthKey; use crate::protocol::clock::DurationSinceUnixEpoch; +use crate::tracker::key::AuthKey; -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKeyResource { pub key: String, pub valid_until: Option, @@ -36,8 +36,8 @@ mod tests { use std::time::Duration; use super::AuthKeyResource; - use crate::key::AuthKey; use crate::protocol::clock::{DefaultClock, TimeNow}; + use crate::tracker::key::AuthKey; #[test] fn it_should_be_convertible_into_an_auth_key() { diff --git a/src/api/resources/stats_resource.rs b/src/api/resources/stats_resource.rs index 2fbaf42c1..e6f184897 100644 --- a/src/api/resources/stats_resource.rs +++ b/src/api/resources/stats_resource.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct StatsResource { pub torrents: u32, pub seeders: u32, diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 11e9d7196..784ffcb05 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,9 +1,9 @@ use serde::{Deserialize, Serialize}; -use crate::peer::TorrentPeer; -use crate::PeerId; +use crate::protocol::common::PeerId; +use crate::tracker::peer::TorrentPeer; -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentResource { pub info_hash: String, pub seeders: u32, @@ -13,7 +13,7 @@ pub struct TorrentResource { pub peers: Option>, } -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentListItemResource { pub info_hash: String, pub seeders: u32, @@ -23,7 +23,7 @@ pub struct TorrentListItemResource { pub peers: Option>, } -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentPeerResource { pub peer_id: PeerIdResource, pub peer_addr: String, @@ -36,7 +36,7 @@ pub struct TorrentPeerResource { pub event: String, } -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct PeerIdResource { pub id: Option, pub client: Option, @@ -52,6 +52,7 @@ impl From for PeerIdResource { } impl From for TorrentPeerResource { + #[allow(deprecated)] fn from(peer: TorrentPeer) -> Self { TorrentPeerResource { peer_id: PeerIdResource::from(peer.peer_id), diff --git a/src/config.rs b/src/config.rs index 8c17070d2..1afc55e54 100644 --- a/src/config.rs +++ b/src/config.rs @@ -10,7 +10,7 @@ use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; use crate::databases::database::DatabaseDrivers; -use crate::mode::TrackerMode; +use crate::tracker::mode::TrackerMode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct UdpTrackerConfig { @@ -161,6 +161,7 @@ impl Configuration { #[cfg(test)] mod tests { + use crate::config::{Configuration, ConfigurationError}; #[cfg(test)] fn default_config_toml() -> String { @@ -205,8 +206,6 @@ mod tests { #[test] fn configuration_should_have_default_values() { - use crate::Configuration; - let configuration = Configuration::default(); let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); @@ -216,8 +215,6 @@ mod tests { #[test] fn configuration_should_contain_the_external_ip() { - use crate::Configuration; - let configuration = Configuration::default(); assert_eq!(configuration.external_ip, Option::Some(String::from("0.0.0.0"))); @@ -229,8 +226,6 @@ mod tests { use uuid::Uuid; - use crate::Configuration; - // Build temp config file path let temp_directory = env::temp_dir(); let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); @@ -275,8 +270,6 @@ mod tests { #[test] fn configuration_should_be_loaded_from_a_toml_config_file() { - use crate::Configuration; - let config_file_path = create_temp_config_file_with_default_config(); let configuration = Configuration::load_from_file(&config_file_path).expect("Could not load configuration from file"); @@ -286,8 +279,6 @@ mod tests { #[test] fn configuration_error_could_be_displayed() { - use crate::ConfigurationError; - let error = ConfigurationError::TrackerModeIncompatible; assert_eq!(format!("{}", error), "TrackerModeIncompatible"); diff --git a/src/databases/database.rs b/src/databases/database.rs index 795be0d45..52ca68291 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize}; use crate::databases::mysql::MysqlDatabase; use crate::databases::sqlite::SqliteDatabase; +use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; -use crate::InfoHash; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub enum DatabaseDrivers { diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index fc6ff5098..5e7410ac2 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -10,8 +10,8 @@ use r2d2_mysql::MysqlConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; +use crate::protocol::common::{InfoHash, AUTH_KEY_LENGTH}; use crate::tracker::key::AuthKey; -use crate::{InfoHash, AUTH_KEY_LENGTH}; pub struct MysqlDatabase { pool: Pool, diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 7a567b07e..cf710a7e1 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -8,8 +8,8 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; +use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; -use crate::InfoHash; pub struct SqliteDatabase { pool: Pool, diff --git a/src/http/filters.rs b/src/http/filters.rs index 42d1592ff..d8f5a81f8 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -5,10 +5,12 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; -use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; +use super::errors::ServerError; +use super::request::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest}; +use super::WebResult; +use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; use crate::tracker::key::AuthKey; use crate::tracker::TorrentTracker; -use crate::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; /// Pass Arc along pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 87d2d51f6..c8b33c6d0 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -7,16 +7,17 @@ use log::debug; use warp::http::Response; use warp::{reject, Rejection, Reply}; -use crate::http::{ - AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, - WebResult, -}; -use crate::peer::TorrentPeer; +use super::errors::ServerError; +use super::request::{AnnounceRequest, ScrapeRequest}; +use super::response::{AnnounceResponse, Peer, ScrapeResponse, ScrapeResponseEntry}; +use crate::http::response::ErrorResponse; +use crate::http::WebResult; +use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; +use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::tracker::TorrentTracker; -use crate::InfoHash; /// Authenticate InfoHash using optional AuthKey pub async fn authenticate( diff --git a/src/http/mod.rs b/src/http/mod.rs index 4842c0a25..6e3ce7111 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,11 +1,3 @@ -pub use self::errors::*; -pub use self::filters::*; -pub use self::handlers::*; -pub use self::request::*; -pub use self::response::*; -pub use self::routes::*; -pub use self::server::*; - pub mod errors; pub mod filters; pub mod handlers; diff --git a/src/http/request.rs b/src/http/request.rs index 6dd025e8c..2d72a1a3c 100644 --- a/src/http/request.rs +++ b/src/http/request.rs @@ -3,7 +3,7 @@ use std::net::IpAddr; use serde::Deserialize; use crate::http::Bytes; -use crate::{InfoHash, PeerId}; +use crate::protocol::common::{InfoHash, PeerId}; #[derive(Deserialize)] pub struct AnnounceRequestQuery { diff --git a/src/http/response.rs b/src/http/response.rs index c87b5e0e8..44387a9f3 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -6,7 +6,7 @@ use std::net::IpAddr; use serde; use serde::Serialize; -use crate::InfoHash; +use crate::protocol::common::InfoHash; #[derive(Serialize)] pub struct Peer { diff --git a/src/http/routes.rs b/src/http/routes.rs index 8bfaf5ed9..f82bf45bc 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -3,9 +3,8 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use crate::http::{ - handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker, -}; +use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use super::handlers::{handle_announce, handle_scrape, send_error}; use crate::tracker::TorrentTracker; /// All routes diff --git a/src/http/server.rs b/src/http/server.rs index 4e48f97e3..d60387346 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -1,7 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; -use crate::http::routes; +use super::routes; use crate::tracker::TorrentTracker; /// Server that listens on HTTP, needs a TorrentTracker @@ -17,9 +17,10 @@ impl HttpServer { /// Start the HttpServer pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { - let (_addr, server) = warp::serve(routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); + let (_addr, server) = + warp::serve(routes::routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }); server } @@ -31,7 +32,7 @@ impl HttpServer { ssl_cert_path: String, ssl_key_path: String, ) -> impl warp::Future { - let (_addr, server) = warp::serve(routes(self.tracker.clone())) + let (_addr, server) = warp::serve(routes::routes(self.tracker.clone())) .tls() .cert_path(ssl_cert_path) .key_path(ssl_key_path) diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 2d8f307b4..8ae9eb3f5 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -4,8 +4,9 @@ use std::sync::Arc; use log::{info, warn}; use tokio::task::JoinHandle; +use crate::config::HttpTrackerConfig; +use crate::http::server::HttpServer; use crate::tracker::TorrentTracker; -use crate::{HttpServer, HttpTrackerConfig}; pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 04b064043..3b572d780 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -4,8 +4,8 @@ use chrono::Utc; use log::info; use tokio::task::JoinHandle; +use crate::config::Configuration; use crate::tracker::TorrentTracker; -use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index ba5b8a1fb..b0b315f44 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -5,8 +5,8 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::api::server; +use crate::config::Configuration; use crate::tracker::TorrentTracker; -use crate::Configuration; #[derive(Debug)] pub struct ApiServerJobStarted(); diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 00fdaddbe..90986455c 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -3,8 +3,9 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; +use crate::config::UdpTrackerConfig; use crate::tracker::TorrentTracker; -use crate::{UdpServer, UdpTrackerConfig}; +use crate::udp::server::UdpServer; pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); diff --git a/src/lib.rs b/src/lib.rs index cf830f108..7e4fe13a7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,3 @@ -pub use api::server::*; -pub use http::server::*; -pub use protocol::common::*; -pub use udp::server::*; - -pub use self::config::*; -pub use self::tracker::*; - pub mod api; pub mod config; pub mod databases; diff --git a/src/logging.rs b/src/logging.rs index 5d0efa8a4..7682bace1 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -3,7 +3,7 @@ use std::sync::Once; use log::{info, LevelFilter}; -use crate::Configuration; +use crate::config::Configuration; static INIT: Once = Once::new(); diff --git a/src/main.rs b/src/main.rs index bf832dbf4..f64354fcf 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,9 +1,10 @@ use std::sync::Arc; use log::info; +use torrust_tracker::config::Configuration; use torrust_tracker::stats::setup_statistics; use torrust_tracker::tracker::TorrentTracker; -use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; +use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time}; #[tokio::main] async fn main() { diff --git a/src/protocol/common.rs b/src/protocol/common.rs index ce1cbf253..efeb328c9 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -94,12 +94,13 @@ impl<'de> serde::de::Deserialize<'de> for InfoHash { #[cfg(test)] mod tests { + use std::str::FromStr; use serde::{Deserialize, Serialize}; use serde_json::json; - use crate::InfoHash; + use super::InfoHash; #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] struct ContainingInfoHash { diff --git a/src/setup.rs b/src/setup.rs index 9906a2d03..736f448b6 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; +use crate::config::Configuration; use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; use crate::tracker::TorrentTracker; -use crate::Configuration; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); diff --git a/src/stats.rs b/src/stats.rs index 1f387a084..22b74c8d3 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,4 +1,4 @@ -use crate::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; +use crate::tracker::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, StatsRepository) { let mut stats_event_sender = None; diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 1bf0557a1..6d3f3c320 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -7,7 +7,7 @@ use rand::{thread_rng, Rng}; use serde::Serialize; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; -use crate::AUTH_KEY_LENGTH; +use crate::protocol::common::AUTH_KEY_LENGTH; pub fn generate_auth_key(lifetime: Duration) -> AuthKey { let key: String = thread_rng() diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a3eecd427..f31347e3e 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,15 +13,15 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; +use self::mode::TrackerMode; +use self::peer::TorrentPeer; +use self::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; +use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; -use crate::mode::TrackerMode; -use crate::peer::TorrentPeer; use crate::protocol::common::InfoHash; -use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::tracker::key::AuthKey; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -use crate::Configuration; pub struct TorrentTracker { pub config: Arc, diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 42ef6a60b..77613e080 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -4,7 +4,7 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::Serialize; -use crate::http::AnnounceRequest; +use crate::http::request::AnnounceRequest; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; @@ -95,9 +95,9 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, Time}; - use crate::PeerId; + use crate::protocol::common::PeerId; + use crate::tracker::peer::TorrentPeer; #[test] fn it_should_be_serializable() { @@ -129,7 +129,7 @@ mod test { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::peer::TorrentPeer; + use crate::tracker::peer::TorrentPeer; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. @@ -200,8 +200,8 @@ mod test { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; - use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::peer::TorrentPeer; + use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::tracker::peer::TorrentPeer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { @@ -241,8 +241,8 @@ mod test { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; - use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::peer::TorrentPeer; + use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::tracker::peer::TorrentPeer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { @@ -281,9 +281,9 @@ mod test { mod torrent_peer_constructor_from_for_http_requests { use std::net::{IpAddr, Ipv4Addr}; - use crate::http::AnnounceRequest; - use crate::peer::TorrentPeer; - use crate::{InfoHash, PeerId}; + use crate::http::request::AnnounceRequest; + use crate::protocol::common::{InfoHash, PeerId}; + use crate::tracker::peer::TorrentPeer; fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> AnnounceRequest { AnnounceRequest { diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index ac3889270..50804a5f4 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -271,7 +271,7 @@ impl StatsRepository { mod tests { mod stats_tracker { - use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; + use crate::tracker::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; #[tokio::test] async fn should_contain_the_tracker_statistics() { @@ -295,7 +295,7 @@ mod tests { } mod event_handler { - use crate::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 335554006..f23858949 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -4,9 +4,9 @@ use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; -use crate::peer::TorrentPeer; +use super::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, TimeNow}; -use crate::{PeerId, MAX_SCRAPE_TORRENTS}; +use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct TorrentEntry { @@ -113,10 +113,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}; - use crate::torrent::TorrentEntry; - use crate::PeerId; + use crate::protocol::common::PeerId; + use crate::tracker::peer::TorrentPeer; + use crate::tracker::torrent::TorrentEntry; struct TorrentPeerBuilder { peer: TorrentPeer, diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index c40a56959..ef241245a 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -2,8 +2,8 @@ use std::net::SocketAddr; use aquatic_udp_protocol::ConnectionId; +use super::errors::ServerError; use crate::protocol::clock::time_extent::{Extent, TimeExtent}; -use crate::udp::ServerError; pub type Cookie = [u8; 8]; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 5514bc1eb..30b33225c 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -7,13 +7,13 @@ use aquatic_udp_protocol::{ }; use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; -use crate::peer::TorrentPeer; +use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; +use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; use crate::tracker::TorrentTracker; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, &None).await { @@ -252,12 +252,13 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::mode::TrackerMode; - use crate::peer::TorrentPeer; + use crate::config::Configuration; use crate::protocol::clock::{DefaultClock, Time}; - use crate::statistics::StatsTracker; + use crate::protocol::common::PeerId; + use crate::tracker::mode::TrackerMode; + use crate::tracker::peer::TorrentPeer; + use crate::tracker::statistics::StatsTracker; use crate::tracker::TorrentTracker; - use crate::{Configuration, PeerId}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) @@ -373,10 +374,10 @@ mod tests { use mockall::predicate::eq; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_connect; + use crate::udp::handlers::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; fn sample_connect_request() -> ConnectRequest { @@ -545,15 +546,15 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::protocol::common::PeerId; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, }; - use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -716,11 +717,11 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::protocol::common::PeerId; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; - use crate::PeerId; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -770,15 +771,15 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::protocol::common::PeerId; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, }; - use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -951,10 +952,10 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::statistics::StatsTracker; + use crate::tracker::statistics::StatsTracker; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::TrackerConfigurationBuilder; @@ -1013,11 +1014,11 @@ mod tests { }; use super::TorrentPeerBuilder; + use crate::protocol::common::PeerId; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_scrape; + use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; - use crate::PeerId; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1123,7 +1124,7 @@ mod tests { use aquatic_udp_protocol::InfoHash; - use crate::udp::handle_scrape; + use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::scrape_request::{ add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; @@ -1162,7 +1163,7 @@ mod tests { mod with_a_whitelisted_tracker { use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handle_scrape; + use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; @@ -1231,7 +1232,7 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; @@ -1264,7 +1265,7 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 4c98875c5..327f03eed 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -1,8 +1,3 @@ -pub use self::errors::*; -pub use self::handlers::*; -pub use self::request::*; -pub use self::server::*; - pub mod connection_cookie; pub mod errors; pub mod handlers; diff --git a/src/udp/request.rs b/src/udp/request.rs index 6531f54b9..67aaeb57f 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -1,6 +1,6 @@ use aquatic_udp_protocol::AnnounceRequest; -use crate::InfoHash; +use crate::protocol::common::InfoHash; // struct AnnounceRequest { // pub connection_id: i64, diff --git a/src/udp/server.rs b/src/udp/server.rs index 2f41c3c4d..5c215f9ec 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -7,7 +7,8 @@ use log::{debug, info}; use tokio::net::UdpSocket; use crate::tracker::TorrentTracker; -use crate::udp::{handle_packet, MAX_PACKET_SIZE}; +use crate::udp::handlers::handle_packet; +use crate::udp::MAX_PACKET_SIZE; pub struct UdpServer { socket: Arc, diff --git a/tests/api.rs b/tests/api.rs index 475da9a24..a5ae79621 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -19,13 +19,15 @@ mod tracker_api { use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; use torrust_tracker::api::resources::stats_resource::StatsResource; use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; + use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; - use torrust_tracker::peer::TorrentPeer; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; + use torrust_tracker::protocol::common::{InfoHash, PeerId}; use torrust_tracker::tracker::key::AuthKey; + use torrust_tracker::tracker::peer::TorrentPeer; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration, InfoHash, PeerId}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; use crate::common::ephemeral_random_port; diff --git a/tests/udp.rs b/tests/udp.rs index ab96259c5..7a0d883a5 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -18,11 +18,12 @@ mod udp_tracker_server { }; use tokio::net::UdpSocket; use tokio::task::JoinHandle; + use torrust_tracker::config::Configuration; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; use crate::common::ephemeral_random_port; From 81c41293cb6c029085efcced0ff6bcb40c586d90 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 17:19:34 +0100 Subject: [PATCH 155/435] vscode: clippy padantic warnings --- .vscode/settings.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.vscode/settings.json b/.vscode/settings.json index f1027e9bd..94f199bd6 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,4 +3,6 @@ "editor.formatOnSave": true }, "rust-analyzer.checkOnSave.command": "clippy", + "rust-analyzer.checkOnSave.allTargets": true, + "rust-analyzer.checkOnSave.extraArgs": ["--","-W","clippy::pedantic"], } \ No newline at end of file From 2b88ce50070ae8593e9d06b1788cbffce016139e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:11:12 +0100 Subject: [PATCH 156/435] clippy: auto fix --- src/api/resources/mod.rs | 6 ++--- src/api/resources/torrent_resource.rs | 2 +- src/api/server.rs | 2 +- src/config.rs | 4 +++- src/databases/database.rs | 2 +- src/databases/sqlite.rs | 6 ++--- src/http/filters.rs | 20 +++++++++-------- src/http/handlers.rs | 4 ++-- src/http/response.rs | 4 +++- src/http/server.rs | 7 +++--- src/jobs/http_tracker.rs | 1 + src/jobs/torrent_cleanup.rs | 1 + src/jobs/tracker_api.rs | 4 +--- src/jobs/udp_tracker.rs | 1 + src/protocol/clock/mod.rs | 4 +++- src/protocol/clock/time_extent.rs | 32 +++++++++++++++------------ src/protocol/common.rs | 4 +++- src/protocol/crypto.rs | 4 ++-- src/stats.rs | 1 + src/tracker/key.rs | 3 +++ src/tracker/mod.rs | 6 ++--- src/tracker/peer.rs | 6 ++++- src/tracker/statistics.rs | 6 ++++- src/tracker/torrent.rs | 7 ++++-- src/udp/connection_cookie.rs | 3 +++ src/udp/handlers.rs | 2 +- src/udp/mod.rs | 2 +- src/udp/request.rs | 1 + tests/api.rs | 4 ++-- tests/udp.rs | 6 ++--- 30 files changed, 95 insertions(+), 60 deletions(-) diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index d214d8a59..2b3e4b886 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -2,9 +2,9 @@ //! //! WIP. Not all endpoints have their resource structs. //! -//! - [x] AuthKeys -//! - [ ] TorrentResource, TorrentListItemResource, TorrentPeerResource, PeerIdResource -//! - [ ] StatsResource +//! - [x] `AuthKeys` +//! - [ ] `TorrentResource`, `TorrentListItemResource`, `TorrentPeerResource`, `PeerIdResource` +//! - [ ] `StatsResource` //! - [ ] ... pub mod auth_key_resource; pub mod stats_resource; diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 784ffcb05..eb9620d23 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -46,7 +46,7 @@ impl From for PeerIdResource { fn from(peer_id: PeerId) -> Self { PeerIdResource { id: peer_id.get_id(), - client: peer_id.get_client_name().map(|client_name| client_name.to_string()), + client: peer_id.get_client_name().map(std::string::ToString::to_string), } } } diff --git a/src/api/server.rs b/src/api/server.rs index 41e6f7074..ce272b3ac 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -10,7 +10,7 @@ use warp::{filters, reply, serve, Filter}; use super::resources::auth_key_resource::AuthKeyResource; use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; -use crate::protocol::common::*; +use crate::protocol::common::InfoHash; use crate::tracker::TorrentTracker; #[derive(Deserialize, Debug)] diff --git a/src/config.rs b/src/config.rs index 1afc55e54..1199c7fe7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -77,6 +77,7 @@ impl std::fmt::Display for ConfigurationError { impl std::error::Error for ConfigurationError {} impl Configuration { + #[must_use] pub fn get_ext_ip(&self) -> Option { match &self.external_ip { None => None, @@ -87,6 +88,7 @@ impl Configuration { } } + #[must_use] pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), @@ -198,7 +200,7 @@ mod tests { admin = "MyAccessToken" "# .lines() - .map(|line| line.trim_start()) + .map(str::trim_start) .collect::>() .join("\n"); config diff --git a/src/databases/database.rs b/src/databases/database.rs index 52ca68291..87a91ddeb 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -55,7 +55,7 @@ pub trait Database: Sync + Send { async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - if let Err(e) = self.get_info_hash_from_whitelist(&info_hash.to_owned().to_string()).await { + if let Err(e) = self.get_info_hash_from_whitelist(&info_hash.clone().to_string()).await { if let Error::QueryReturnedNoRows = e { return Ok(false); } else { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index cf710a7e1..19849f297 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -70,7 +70,7 @@ impl Database for SqliteDatabase { Ok((info_hash, completed)) })?; - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok()).collect(); + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); Ok(torrents) } @@ -90,7 +90,7 @@ impl Database for SqliteDatabase { }) })?; - let keys: Vec = keys_iter.filter_map(|x| x.ok()).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -106,7 +106,7 @@ impl Database for SqliteDatabase { Ok(InfoHash::from_str(&info_hash).unwrap()) })?; - let info_hashes: Vec = info_hash_iter.filter_map(|x| x.ok()).collect(); + let info_hashes: Vec = info_hash_iter.filter_map(std::result::Result::ok).collect(); Ok(info_hashes) } diff --git a/src/http/filters.rs b/src/http/filters.rs index d8f5a81f8..d33acbcfa 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -13,6 +13,7 @@ use crate::tracker::key::AuthKey; use crate::tracker::TorrentTracker; /// Pass Arc along +#[must_use] pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { warp::any().map(move || tracker.clone()) } @@ -22,19 +23,20 @@ pub fn with_info_hash() -> impl Filter,), Error = Rejec warp::filters::query::raw().and_then(info_hashes) } -/// Check for PeerId +/// Check for `PeerId` pub fn with_peer_id() -> impl Filter + Clone { warp::filters::query::raw().and_then(peer_id) } /// Pass Arc along +#[must_use] pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() .map(|key: String| AuthKey::from_string(&key)) .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } -/// Check for PeerAddress +/// Check for `PeerAddress` pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { warp::addr::remote() .and(warp::header::optional::("X-Forwarded-For")) @@ -44,7 +46,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) @@ -53,7 +55,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { warp::any() .and(with_info_hash()) @@ -61,7 +63,7 @@ pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter WebResult> { let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut info_hashes: Vec = Vec::new(); @@ -86,7 +88,7 @@ async fn info_hashes(raw_query: String) -> WebResult> { } } -/// Parse PeerId from raw query string +/// Parse `PeerId` from raw query string async fn peer_id(raw_query: String) -> WebResult { // put all query params in a vec let split_raw_query: Vec<&str> = raw_query.split('&').collect(); @@ -123,7 +125,7 @@ async fn peer_id(raw_query: String) -> WebResult { } } -/// Get PeerAddress from RemoteAddress or Forwarded +/// Get `PeerAddress` from `RemoteAddress` or Forwarded async fn peer_addr( (on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option), ) -> WebResult { @@ -151,7 +153,7 @@ async fn peer_addr( } } -/// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option +/// Parse `AnnounceRequest` from raw `AnnounceRequestQuery`, `InfoHash` and Option async fn announce_request( announce_request_query: AnnounceRequestQuery, info_hashes: Vec, @@ -171,7 +173,7 @@ async fn announce_request( }) } -/// Parse ScrapeRequest from InfoHash +/// Parse `ScrapeRequest` from `InfoHash` async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { Ok(ScrapeRequest { info_hashes, peer_addr }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index c8b33c6d0..cd521b43b 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -19,7 +19,7 @@ use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::tracker::TorrentTracker; -/// Authenticate InfoHash using optional AuthKey +/// Authenticate `InfoHash` using optional `AuthKey` pub async fn authenticate( info_hash: &InfoHash, auth_key: &Option, @@ -93,7 +93,7 @@ pub async fn handle_scrape( let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; - for info_hash in scrape_request.info_hashes.iter() { + for info_hash in &scrape_request.info_hashes { let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { diff --git a/src/http/response.rs b/src/http/response.rs index 44387a9f3..cb01068fa 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -27,6 +27,7 @@ pub struct AnnounceResponse { } impl AnnounceResponse { + #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() } @@ -89,7 +90,7 @@ impl ScrapeResponse { bytes.write_all(b"d5:filesd")?; - for (info_hash, scrape_response_entry) in self.files.iter() { + for (info_hash, scrape_response_entry) in &self.files { bytes.write_all(b"20:")?; bytes.write_all(&info_hash.0)?; bytes.write_all(b"d8:completei")?; @@ -114,6 +115,7 @@ pub struct ErrorResponse { } impl ErrorResponse { + #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() } diff --git a/src/http/server.rs b/src/http/server.rs index d60387346..97ec30aa0 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -4,18 +4,19 @@ use std::sync::Arc; use super::routes; use crate::tracker::TorrentTracker; -/// Server that listens on HTTP, needs a TorrentTracker +/// Server that listens on HTTP, needs a `TorrentTracker` #[derive(Clone)] pub struct HttpServer { tracker: Arc, } impl HttpServer { + #[must_use] pub fn new(tracker: Arc) -> HttpServer { HttpServer { tracker } } - /// Start the HttpServer + /// Start the `HttpServer` pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { let (_addr, server) = warp::serve(routes::routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { @@ -25,7 +26,7 @@ impl HttpServer { server } - /// Start the HttpServer in TLS mode + /// Start the `HttpServer` in TLS mode pub fn start_tls( &self, socket_addr: SocketAddr, diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 8ae9eb3f5..6070e0d27 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -8,6 +8,7 @@ use crate::config::HttpTrackerConfig; use crate::http::server::HttpServer; use crate::tracker::TorrentTracker; +#[must_use] pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 3b572d780..3d7b49d6b 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -7,6 +7,7 @@ use tokio::task::JoinHandle; use crate::config::Configuration; use crate::tracker::TorrentTracker; +#[must_use] pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(&tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index b0b315f44..ac7657858 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -26,9 +26,7 @@ pub async fn start_job(config: &Configuration, tracker: Arc) -> let join_handle = tokio::spawn(async move { let handel = server::start(bind_addr, tracker); - if tx.send(ApiServerJobStarted()).is_err() { - panic!("the start job dropped"); - } + assert!(tx.send(ApiServerJobStarted()).is_ok(), "the start job dropped"); handel.await; }); diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 90986455c..8bf839380 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -7,6 +7,7 @@ use crate::config::UdpTrackerConfig; use crate::tracker::TorrentTracker; use crate::udp::server::UdpServer; +#[must_use] pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs index 4e15950e6..51197dba6 100644 --- a/src/protocol/clock/mod.rs +++ b/src/protocol/clock/mod.rs @@ -26,9 +26,11 @@ pub trait Time: Sized { } pub trait TimeNow: Time { + #[must_use] fn add(add_time: &Duration) -> Option { Self::now().checked_add(*add_time) } + #[must_use] fn sub(sub_time: &Duration) -> Option { Self::now().checked_sub(*sub_time) } @@ -240,7 +242,7 @@ mod stopped_clock { #[test] fn it_should_get_app_start_time() { - const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1662983731, 22312); + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); } } diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index 3fa60de82..f975e9a04 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -37,6 +37,7 @@ pub const MAX: TimeExtent = TimeExtent { }; impl TimeExtent { + #[must_use] pub const fn from_sec(seconds: u64, amount: &TimeExtentMultiplier) -> Self { Self { increment: TimeExtentBase::from_secs(seconds), @@ -48,10 +49,10 @@ impl TimeExtent { fn checked_duration_from_nanos(time: u128) -> Result { const NANOS_PER_SEC: u32 = 1_000_000_000; - let secs = time.div_euclid(NANOS_PER_SEC as u128); - let nanos = time.rem_euclid(NANOS_PER_SEC as u128); + let secs = time.div_euclid(u128::from(NANOS_PER_SEC)); + let nanos = time.rem_euclid(u128::from(NANOS_PER_SEC)); - assert!(nanos < NANOS_PER_SEC as u128); + assert!(nanos < u128::from(NANOS_PER_SEC)); match u64::try_from(secs) { Err(error) => Err(error), @@ -94,14 +95,14 @@ impl Extent for TimeExtent { fn total(&self) -> Option> { self.increment .as_nanos() - .checked_mul(self.amount as u128) + .checked_mul(u128::from(self.amount)) .map(checked_duration_from_nanos) } fn total_next(&self) -> Option> { self.increment .as_nanos() - .checked_mul((self.amount as u128) + 1) + .checked_mul(u128::from(self.amount) + 1) .map(checked_duration_from_nanos) } } @@ -110,6 +111,7 @@ pub trait MakeTimeExtent: Sized where Clock: TimeNow, { + #[must_use] fn now(increment: &TimeExtentBase) -> Option> { Clock::now() .as_nanos() @@ -120,6 +122,7 @@ where }) } + #[must_use] fn now_after(increment: &TimeExtentBase, add_time: &Duration) -> Option> { match Clock::add(add_time) { None => None, @@ -134,6 +137,7 @@ where } } + #[must_use] fn now_before(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { None => None, @@ -173,7 +177,7 @@ mod test { }; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; - const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239812388723); + const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); mod fn_checked_duration_from_nanos { use std::time::Duration; @@ -190,11 +194,11 @@ mod test { #[test] fn it_should_be_the_same_as_duration_implementation_for_u64_numbers() { assert_eq!( - checked_duration_from_nanos(1232143214343432).unwrap(), - Duration::from_nanos(1232143214343432) + checked_duration_from_nanos(1_232_143_214_343_432).unwrap(), + Duration::from_nanos(1_232_143_214_343_432) ); assert_eq!( - checked_duration_from_nanos(u64::MAX as u128).unwrap(), + checked_duration_from_nanos(u128::from(u64::MAX)).unwrap(), Duration::from_nanos(u64::MAX) ); } @@ -202,7 +206,7 @@ mod test { #[test] fn it_should_work_for_some_numbers_larger_than_u64() { assert_eq!( - checked_duration_from_nanos(TIME_EXTENT_VAL.amount as u128 * NANOS_PER_SEC as u128).unwrap(), + checked_duration_from_nanos(u128::from(TIME_EXTENT_VAL.amount) * u128::from(NANOS_PER_SEC)).unwrap(), Duration::from_secs(TIME_EXTENT_VAL.amount) ); } @@ -515,14 +519,14 @@ mod test { assert_eq!( DefaultTimeExtentMaker::now_before( - &TimeExtentBase::from_secs(u32::MAX as u64), - &Duration::from_secs(u32::MAX as u64) + &TimeExtentBase::from_secs(u64::from(u32::MAX)), + &Duration::from_secs(u64::from(u32::MAX)) ) .unwrap() .unwrap(), TimeExtent { - increment: TimeExtentBase::from_secs(u32::MAX as u64), - amount: 4294967296 + increment: TimeExtentBase::from_secs(u64::from(u32::MAX)), + amount: 4_294_967_296 } ); } diff --git a/src/protocol/common.rs b/src/protocol/common.rs index efeb328c9..c5c9b4578 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -233,14 +233,16 @@ impl std::fmt::Display for PeerId { } impl PeerId { + #[must_use] pub fn get_id(&self) -> Option { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; binascii::bin2hex(&self.0, &mut tmp).unwrap(); - std::str::from_utf8(&tmp).ok().map(|id| id.to_string()) + std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) } + #[must_use] pub fn get_client_name(&self) -> Option<&'static str> { if self.0[0] == b'M' { return Some("BitTorrent"); diff --git a/src/protocol/crypto.rs b/src/protocol/crypto.rs index 18cfaf5e6..6e1517ef8 100644 --- a/src/protocol/crypto.rs +++ b/src/protocol/crypto.rs @@ -89,8 +89,8 @@ pub mod keys { #[test] fn it_should_have_a_large_random_seed() { - assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u64::MAX as u128); - assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u64::MAX as u128); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); } } } diff --git a/src/stats.rs b/src/stats.rs index 22b74c8d3..738909934 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,5 +1,6 @@ use crate::tracker::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; +#[must_use] pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, StatsRepository) { let mut stats_event_sender = None; diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 6d3f3c320..881dac877 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -9,6 +9,7 @@ use serde::Serialize; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; +#[must_use] pub fn generate_auth_key(lifetime: Duration) -> AuthKey { let key: String = thread_rng() .sample_iter(&Alphanumeric) @@ -43,6 +44,7 @@ pub struct AuthKey { } impl AuthKey { + #[must_use] pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { Some(AuthKey { key, valid_until: None }) @@ -51,6 +53,7 @@ impl AuthKey { } } + #[must_use] pub fn from_string(key: &str) -> Option { if key.len() != AUTH_KEY_LENGTH { None diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index f31347e3e..6aae06a4b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -201,7 +201,7 @@ impl TorrentTracker { match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), + Some(entry) => entry.get_peers(Some(client_addr)).into_iter().copied().collect(), } } @@ -211,7 +211,7 @@ impl TorrentTracker { match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), + Some(entry) => entry.get_peers(None).into_iter().copied().collect(), } } @@ -236,9 +236,9 @@ impl TorrentTracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); TorrentStats { + completed, seeders, leechers, - completed, } } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 77613e080..a5f000eca 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -26,6 +26,7 @@ pub struct TorrentPeer { } impl TorrentPeer { + #[must_use] pub fn from_udp_announce_request( announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, @@ -44,6 +45,7 @@ impl TorrentPeer { } } + #[must_use] pub fn from_http_announce_request( announce_request: &AnnounceRequest, remote_ip: IpAddr, @@ -63,7 +65,7 @@ impl TorrentPeer { }; TorrentPeer { - peer_id: announce_request.peer_id.clone(), + peer_id: announce_request.peer_id, peer_addr, updated: DefaultClock::now(), uploaded: NumberOfBytes(announce_request.uploaded as i64), @@ -74,6 +76,7 @@ impl TorrentPeer { } // potentially substitute localhost ip with external ip + #[must_use] pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { if let Some(host_ip) = host_opt_ip.filter(|_| remote_ip.is_loopback()) { SocketAddr::new(host_ip, port) @@ -82,6 +85,7 @@ impl TorrentPeer { } } + #[must_use] pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 50804a5f4..609f036aa 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use async_trait::async_trait; use log::debug; #[cfg(test)] -use mockall::{automock, predicate::*}; +use mockall::{automock, predicate::str}; use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; @@ -47,6 +47,7 @@ impl Default for TrackerStatistics { } impl TrackerStatistics { + #[must_use] pub fn new() -> Self { Self { tcp4_connections_handled: 0, @@ -76,12 +77,14 @@ impl Default for StatsTracker { } impl StatsTracker { + #[must_use] pub fn new() -> Self { Self { stats_repository: StatsRepository::new(), } } + #[must_use] pub fn new_active_instance() -> (Box, StatsRepository) { let mut stats_tracker = Self::new(); @@ -184,6 +187,7 @@ impl Default for StatsRepository { } impl StatsRepository { + #[must_use] pub fn new() -> Self { Self { stats: Arc::new(RwLock::new(TrackerStatistics::new())), diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index f23858949..46608643d 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -16,6 +16,7 @@ pub struct TorrentEntry { } impl TorrentEntry { + #[must_use] pub fn new() -> TorrentEntry { TorrentEntry { peers: std::collections::BTreeMap::new(), @@ -47,6 +48,7 @@ impl TorrentEntry { did_torrent_stats_change } + #[must_use] pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&TorrentPeer> { self.peers .values() @@ -70,6 +72,7 @@ impl TorrentEntry { .collect() } + #[must_use] pub fn get_stats(&self) -> (u32, u32, u32) { let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; let leechers: u32 = self.peers.len() as u32 - seeders; @@ -77,7 +80,7 @@ impl TorrentEntry { } pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = DefaultClock::sub(&Duration::from_secs(max_peer_timeout as u64)).unwrap_or_default(); + let current_cutoff = DefaultClock::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); self.peers.retain(|_, peer| peer.updated > current_cutoff); } } @@ -358,7 +361,7 @@ mod tests { let now = WorkingClock::now(); StoppedClock::local_set(&now); - let timeout_seconds_before_now = now.sub(Duration::from_secs(timeout as u64)); + let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); let inactive_peer = TorrentPeerBuilder::default() .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) .into(); diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index ef241245a..b18940dfc 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -11,14 +11,17 @@ pub type SinceUnixEpochTimeExtent = TimeExtent; pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); +#[must_use] pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { connection_id.0.to_le_bytes() } +#[must_use] pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { ConnectionId(i64::from_le_bytes(*connection_cookie)) } +#[must_use] pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { let time_extent = cookie_builder::get_last_time_extent(); diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 30b33225c..81578e9c3 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -188,7 +188,7 @@ pub async fn handle_scrape( let mut torrent_stats: Vec = Vec::new(); - for info_hash in request.info_hashes.iter() { + for info_hash in &request.info_hashes { let info_hash = InfoHash(info_hash.0); let scrape_entry = match db.get(&info_hash) { diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 327f03eed..2a8d42d9f 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -9,4 +9,4 @@ pub type Port = u16; pub type TransactionId = i64; pub const MAX_PACKET_SIZE: usize = 1496; -pub const PROTOCOL_ID: i64 = 0x41727101980; +pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; diff --git a/src/udp/request.rs b/src/udp/request.rs index 67aaeb57f..53d646f1a 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -23,6 +23,7 @@ pub struct AnnounceRequestWrapper { } impl AnnounceRequestWrapper { + #[must_use] pub fn new(announce_request: AnnounceRequest) -> Self { AnnounceRequestWrapper { announce_request: announce_request.clone(), diff --git a/tests/api.rs b/tests/api.rs index a5ae79621..14fefa50e 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -1,6 +1,6 @@ /// Integration tests for the tracker API /// -/// cargo test tracker_api -- --nocapture +/// cargo test `tracker_api` -- --nocapture extern crate rand; mod common; @@ -192,7 +192,7 @@ mod tracker_api { let torrent_peer = TorrentPeer { peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1669397478934, 0), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), diff --git a/tests/udp.rs b/tests/udp.rs index 7a0d883a5..54caeaa68 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -1,6 +1,6 @@ /// Integration tests for UDP tracker server /// -/// cargo test udp_tracker_server -- --nocapture +/// cargo test `udp_tracker_server` -- --nocapture extern crate rand; mod common; @@ -116,7 +116,7 @@ mod udp_tracker_server { } } - /// Creates a new UdpClient connected to a Udp server + /// Creates a new `UdpClient` connected to a Udp server async fn new_connected_udp_client(remote_address: &str) -> UdpClient { let client = UdpClient::bind(&source_address(ephemeral_random_port())).await; client.connect(remote_address).await; @@ -155,7 +155,7 @@ mod udp_tracker_server { } } - /// Creates a new UdpTrackerClient connected to a Udp Tracker server + /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server async fn new_connected_udp_tracker_client(remote_address: &str) -> UdpTrackerClient { let udp_client = new_connected_udp_client(remote_address).await; UdpTrackerClient { udp_client } From f74c93346b2d6aba776867a3db2777101c40a20f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:24:56 +0100 Subject: [PATCH 157/435] clippy: fix src/http/response.rs --- src/http/handlers.rs | 14 +++++++------- src/http/response.rs | 29 ++++++++++++++++++++--------- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index cd521b43b..fc55c7c5b 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -9,9 +9,9 @@ use warp::{reject, Rejection, Reply}; use super::errors::ServerError; use super::request::{AnnounceRequest, ScrapeRequest}; -use super::response::{AnnounceResponse, Peer, ScrapeResponse, ScrapeResponseEntry}; -use crate::http::response::ErrorResponse; -use crate::http::WebResult; +use super::response::{Announce, Peer, Scrape, ScrapeResponseEntry}; +use super::WebResult; +use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; use crate::tracker::peer::TorrentPeer; @@ -151,7 +151,7 @@ fn send_announce_response( }) .collect(); - let res = AnnounceResponse { + let res = Announce { interval, interval_min, complete: torrent_stats.seeders, @@ -172,7 +172,7 @@ fn send_announce_response( /// Send scrape response fn send_scrape_response(files: HashMap) -> WebResult { - let res = ScrapeResponse { files }; + let res = Scrape { files }; match res.write() { Ok(body) => Ok(Response::new(body)), @@ -184,12 +184,12 @@ fn send_scrape_response(files: HashMap) -> WebRes pub async fn send_error(r: Rejection) -> std::result::Result { let body = if let Some(server_error) = r.find::() { debug!("{:?}", server_error); - ErrorResponse { + Error { failure_reason: server_error.to_string(), } .write() } else { - ErrorResponse { + Error { failure_reason: ServerError::InternalServerError.to_string(), } .write() diff --git a/src/http/response.rs b/src/http/response.rs index cb01068fa..98ea6fe73 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::error::Error; use std::io::Write; use std::net::IpAddr; @@ -16,7 +15,7 @@ pub struct Peer { } #[derive(Serialize)] -pub struct AnnounceResponse { +pub struct Announce { pub interval: u32, #[serde(rename = "min interval")] pub interval_min: u32, @@ -26,13 +25,19 @@ pub struct AnnounceResponse { pub peers: Vec, } -impl AnnounceResponse { +impl Announce { + /// # Panics + /// + /// It would panic if the `Announce` struct would contain an inappropriate type. #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() } - pub fn write_compact(&self) -> Result, Box> { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write_compact(&self) -> Result, Box> { let mut peers_v4: Vec = Vec::new(); let mut peers_v6: Vec = Vec::new(); @@ -80,12 +85,15 @@ pub struct ScrapeResponseEntry { } #[derive(Serialize)] -pub struct ScrapeResponse { +pub struct Scrape { pub files: HashMap, } -impl ScrapeResponse { - pub fn write(&self) -> Result, Box> { +impl Scrape { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write(&self) -> Result, Box> { let mut bytes: Vec = Vec::new(); bytes.write_all(b"d5:filesd")?; @@ -109,12 +117,15 @@ impl ScrapeResponse { } #[derive(Serialize)] -pub struct ErrorResponse { +pub struct Error { #[serde(rename = "failure reason")] pub failure_reason: String, } -impl ErrorResponse { +impl Error { + /// # Panics + /// + /// It would panic if the `Error` struct would contain an inappropriate type. #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() From a9f760b8ac8ab9562473347335e10a99db53571d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:25:56 +0100 Subject: [PATCH 158/435] clippy: fix src/http/request.rs --- src/http/filters.rs | 14 +++++++------- src/http/handlers.rs | 15 +++++++-------- src/http/request.rs | 4 ++-- src/tracker/peer.rs | 14 +++++--------- 4 files changed, 21 insertions(+), 26 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index d33acbcfa..f28909c7f 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::errors::ServerError; -use super::request::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest}; +use super::request::{Announce, AnnounceRequestQuery, Scrape}; use super::WebResult; use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; use crate::tracker::key::AuthKey; @@ -47,7 +47,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) @@ -56,7 +56,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) @@ -159,8 +159,8 @@ async fn announce_request( info_hashes: Vec, peer_id: PeerId, peer_addr: IpAddr, -) -> WebResult { - Ok(AnnounceRequest { +) -> WebResult { + Ok(Announce { info_hash: info_hashes[0], peer_addr, downloaded: announce_request_query.downloaded.unwrap_or(0), @@ -174,6 +174,6 @@ async fn announce_request( } /// Parse `ScrapeRequest` from `InfoHash` -async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(ScrapeRequest { info_hashes, peer_addr }) +async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { + Ok(Scrape { info_hashes, peer_addr }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index fc55c7c5b..a312ff105 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -8,9 +8,8 @@ use warp::http::Response; use warp::{reject, Rejection, Reply}; use super::errors::ServerError; -use super::request::{AnnounceRequest, ScrapeRequest}; -use super::response::{Announce, Peer, Scrape, ScrapeResponseEntry}; -use super::WebResult; +use super::response::{self, Peer, ScrapeResponseEntry}; +use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; @@ -44,7 +43,7 @@ pub async fn authenticate( /// Handle announce request pub async fn handle_announce( - announce_request: AnnounceRequest, + announce_request: request::Announce, auth_key: Option, tracker: Arc, ) -> WebResult { @@ -86,7 +85,7 @@ pub async fn handle_announce( /// Handle scrape request pub async fn handle_scrape( - scrape_request: ScrapeRequest, + scrape_request: request::Scrape, auth_key: Option, tracker: Arc, ) -> WebResult { @@ -136,7 +135,7 @@ pub async fn handle_scrape( /// Send announce response fn send_announce_response( - announce_request: &AnnounceRequest, + announce_request: &request::Announce, torrent_stats: TorrentStats, peers: Vec, interval: u32, @@ -151,7 +150,7 @@ fn send_announce_response( }) .collect(); - let res = Announce { + let res = response::Announce { interval, interval_min, complete: torrent_stats.seeders, @@ -172,7 +171,7 @@ fn send_announce_response( /// Send scrape response fn send_scrape_response(files: HashMap) -> WebResult { - let res = Scrape { files }; + let res = response::Scrape { files }; match res.write() { Ok(body) => Ok(Response::new(body)), diff --git a/src/http/request.rs b/src/http/request.rs index 2d72a1a3c..b812e1173 100644 --- a/src/http/request.rs +++ b/src/http/request.rs @@ -17,7 +17,7 @@ pub struct AnnounceRequestQuery { } #[derive(Debug)] -pub struct AnnounceRequest { +pub struct Announce { pub info_hash: InfoHash, pub peer_addr: IpAddr, pub downloaded: Bytes, @@ -29,7 +29,7 @@ pub struct AnnounceRequest { pub compact: Option, } -pub struct ScrapeRequest { +pub struct Scrape { pub info_hashes: Vec, pub peer_addr: IpAddr, } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index a5f000eca..a30723d00 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -4,7 +4,7 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::Serialize; -use crate::http::request::AnnounceRequest; +use crate::http::request::Announce; use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; @@ -46,11 +46,7 @@ impl TorrentPeer { } #[must_use] - pub fn from_http_announce_request( - announce_request: &AnnounceRequest, - remote_ip: IpAddr, - host_opt_ip: Option, - ) -> Self { + pub fn from_http_announce_request(announce_request: &Announce, remote_ip: IpAddr, host_opt_ip: Option) -> Self { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); let event: AnnounceEvent = if let Some(event) = &announce_request.event { @@ -285,12 +281,12 @@ mod test { mod torrent_peer_constructor_from_for_http_requests { use std::net::{IpAddr, Ipv4Addr}; - use crate::http::request::AnnounceRequest; + use crate::http::request::Announce; use crate::protocol::common::{InfoHash, PeerId}; use crate::tracker::peer::TorrentPeer; - fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> AnnounceRequest { - AnnounceRequest { + fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { + Announce { info_hash: InfoHash([0u8; 20]), peer_addr, downloaded: 0u64, From 21b6e777375d2007c27cdc0d9cd9820857809f97 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:33:09 +0100 Subject: [PATCH 159/435] clippy: fix (ignore) src/config.rs --- cSpell.json | 1 + src/config.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/cSpell.json b/cSpell.json index a2c4235c4..cc3359d58 100644 --- a/cSpell.json +++ b/cSpell.json @@ -7,6 +7,7 @@ "bencode", "binascii", "Bitflu", + "bools", "bufs", "byteorder", "canonicalize", diff --git a/src/config.rs b/src/config.rs index 1199c7fe7..dbfb4a140 100644 --- a/src/config.rs +++ b/src/config.rs @@ -37,6 +37,7 @@ pub struct HttpApiConfig { pub access_tokens: HashMap, } +#[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, From 941e9825dcf0c1360212be87b90234e82199d983 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 18:44:35 +0100 Subject: [PATCH 160/435] clippy: fix src/api/resources/auth_key_resource.rs --- src/api/resources/auth_key_resource.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 4fc5d0cf9..3bc0cefb7 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -54,7 +54,7 @@ mod tests { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()) } - ) + ); } #[test] @@ -72,7 +72,7 @@ mod tests { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(duration_in_secs) } - ) + ); } #[test] From a433c825efa2c4df59fd9e8375da623de5be3bf1 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:13:54 +0100 Subject: [PATCH 161/435] clippy: fix src/api/server.rs --- src/api/server.rs | 58 +++++++++++++++++++++-------------------- src/jobs/tracker_api.rs | 2 +- 2 files changed, 31 insertions(+), 29 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index ce272b3ac..f9e5bc368 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -59,7 +59,8 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> impl warp::Future { +#[allow(clippy::too_many_lines)] +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -124,31 +125,31 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let db = tracker.get_torrents().await; - let _: Vec<_> = db - .iter() - .map(|(_info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }) - .collect(); + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }); let stats = tracker.get_stats().await; - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + #[allow(clippy::cast_possible_truncation)] + { + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + } Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) }); @@ -168,11 +169,12 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let db = tracker.get_torrents().await; let torrent_entry_option = db.get(&info_hash); - if torrent_entry_option.is_none() { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); - } - - let torrent_entry = torrent_entry_option.unwrap(); + let torrent_entry = match torrent_entry_option { + Some(torrent_entry) => torrent_entry, + None => { + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); + } + }; let (seeders, completed, leechers) = torrent_entry.get_stats(); let peers = torrent_entry.get_peers(None); diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index ac7657858..4e2dcd0c9 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -24,7 +24,7 @@ pub async fn start_job(config: &Configuration, tracker: Arc) -> // Run the API server let join_handle = tokio::spawn(async move { - let handel = server::start(bind_addr, tracker); + let handel = server::start(bind_addr, &tracker); assert!(tx.send(ApiServerJobStarted()).is_ok(), "the start job dropped"); From 2ba748925cb748259ed92c6de692624c9bc68cdc Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:32:22 +0100 Subject: [PATCH 162/435] clippy: fix src/protocol/common.rs --- src/protocol/common.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/protocol/common.rs b/src/protocol/common.rs index c5c9b4578..d6a98cf03 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -212,9 +212,8 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { serde::de::Unexpected::Str(v), &"expected a hexadecimal string", )); - } else { - Ok(res) - } + }; + Ok(res) } } @@ -249,8 +248,7 @@ impl PeerId { } if self.0[0] == b'-' { let name = match &self.0[1..3] { - b"AG" => "Ares", - b"A~" => "Ares", + b"AG" | b"A~" => "Ares", b"AR" => "Arctic", b"AV" => "Avicora", b"AX" => "BitPump", @@ -333,6 +331,11 @@ impl Serialize for PeerId { client: Option<&'a str>, } + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + let id = std::str::from_utf8(&tmp).ok(); + let obj = PeerIdInfo { id: self.get_id(), client: self.get_client_name(), From 8e3115f80d7f98b70204caf83c253337d640da03 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:45:29 +0100 Subject: [PATCH 163/435] clippy: fix src/config.rs --- src/config.rs | 46 +++++++++++++++++++++++++--------------- src/jobs/http_tracker.rs | 4 ++-- src/jobs/udp_tracker.rs | 4 ++-- 3 files changed, 33 insertions(+), 21 deletions(-) diff --git a/src/config.rs b/src/config.rs index dbfb4a140..ac15f96b3 100644 --- a/src/config.rs +++ b/src/config.rs @@ -13,14 +13,14 @@ use crate::databases::database::DatabaseDrivers; use crate::tracker::mode::TrackerMode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct UdpTrackerConfig { +pub struct UdpTracker { pub enabled: bool, pub bind_address: String, } #[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct HttpTrackerConfig { +pub struct HttpTracker { pub enabled: bool, pub bind_address: String, pub ssl_enabled: bool, @@ -31,7 +31,7 @@ pub struct HttpTrackerConfig { } #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct HttpApiConfig { +pub struct HttpApi { pub enabled: bool, pub bind_address: String, pub access_tokens: HashMap, @@ -53,13 +53,15 @@ pub struct Configuration { pub persistent_torrent_completed_stat: bool, pub inactive_peer_cleanup_interval: u64, pub remove_peerless_torrents: bool, - pub udp_trackers: Vec, - pub http_trackers: Vec, - pub http_api: HttpApiConfig, + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub http_api: HttpApi, } #[derive(Debug)] pub enum ConfigurationError { + Message(String), + ConfigError(ConfigError), IOError(std::io::Error), ParseError(toml::de::Error), TrackerModeIncompatible, @@ -68,9 +70,11 @@ pub enum ConfigurationError { impl std::fmt::Display for ConfigurationError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { + ConfigurationError::Message(e) => e.fmt(f), + ConfigurationError::ConfigError(e) => e.fmt(f), ConfigurationError::IOError(e) => e.fmt(f), ConfigurationError::ParseError(e) => e.fmt(f), - _ => write!(f, "{:?}", self), + ConfigurationError::TrackerModeIncompatible => write!(f, "{:?}", self), } } } @@ -107,7 +111,7 @@ impl Configuration { remove_peerless_torrents: true, udp_trackers: Vec::new(), http_trackers: Vec::new(), - http_api: HttpApiConfig { + http_api: HttpApi { enabled: true, bind_address: String::from("127.0.0.1:1212"), access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] @@ -116,11 +120,11 @@ impl Configuration { .collect(), }, }; - configuration.udp_trackers.push(UdpTrackerConfig { + configuration.udp_trackers.push(UdpTracker { enabled: false, bind_address: String::from("0.0.0.0:6969"), }); - configuration.http_trackers.push(HttpTrackerConfig { + configuration.http_trackers.push(HttpTracker { enabled: false, bind_address: String::from("0.0.0.0:6969"), ssl_enabled: false, @@ -130,31 +134,39 @@ impl Configuration { configuration } - pub fn load_from_file(path: &str) -> Result { + /// # Errors + /// + /// Will return `Err` if `path` does not exist or has a bad configuration. + pub fn load_from_file(path: &str) -> Result { let config_builder = Config::builder(); #[allow(unused_assignments)] let mut config = Config::default(); if Path::new(path).exists() { - config = config_builder.add_source(File::with_name(path)).build()?; + config = config_builder + .add_source(File::with_name(path)) + .build() + .map_err(ConfigurationError::ConfigError)?; } else { eprintln!("No config file found."); eprintln!("Creating config file.."); let config = Configuration::default(); - let _ = config.save_to_file(path); - return Err(ConfigError::Message( + config.save_to_file(path)?; + return Err(ConfigurationError::Message( "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), )); } - let torrust_config: Configuration = config - .try_deserialize() - .map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; + let torrust_config: Configuration = config.try_deserialize().map_err(ConfigurationError::ConfigError)?; Ok(torrust_config) } + /// # Errors + /// + /// Will return `Err` if `filename` does not exist or the user does not have + /// permission to read it. pub fn save_to_file(&self, path: &str) -> Result<(), ConfigurationError> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write(path, toml_string).expect("Could not write to file!"); diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 6070e0d27..f6023a4e0 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use log::{info, warn}; use tokio::task::JoinHandle; -use crate::config::HttpTrackerConfig; +use crate::config::HttpTracker; use crate::http::server::HttpServer; use crate::tracker::TorrentTracker; #[must_use] -pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { +pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; let ssl_cert_path = config.ssl_cert_path.clone(); diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 8bf839380..1b4bc745c 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; -use crate::config::UdpTrackerConfig; +use crate::config::UdpTracker; use crate::tracker::TorrentTracker; use crate::udp::server::UdpServer; #[must_use] -pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { +pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); tokio::spawn(async move { From 87160bdf2ffbfc91853037afda231c08bb2491bb Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:49:28 +0100 Subject: [PATCH 164/435] clippy: fix src/databases/database.rs --- src/config.rs | 6 +++--- src/databases/database.rs | 14 ++++++++++---- src/tracker/mod.rs | 2 +- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/config.rs b/src/config.rs index ac15f96b3..6eb83ad16 100644 --- a/src/config.rs +++ b/src/config.rs @@ -9,7 +9,7 @@ use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; -use crate::databases::database::DatabaseDrivers; +use crate::databases::database::Drivers; use crate::tracker::mode::TrackerMode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] @@ -42,7 +42,7 @@ pub struct HttpApi { pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, - pub db_driver: DatabaseDrivers, + pub db_driver: Drivers, pub db_path: String, pub announce_interval: u32, pub min_announce_interval: u32, @@ -98,7 +98,7 @@ impl Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: TrackerMode::Public, - db_driver: DatabaseDrivers::Sqlite3, + db_driver: Drivers::Sqlite3, db_path: String::from("data.db"), announce_interval: 120, min_announce_interval: 120, diff --git a/src/databases/database.rs b/src/databases/database.rs index 87a91ddeb..212224b25 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -8,18 +8,21 @@ use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub enum DatabaseDrivers { +pub enum Drivers { Sqlite3, MySQL, } -pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result, r2d2::Error> { +/// # Errors +/// +/// Will return `r2d2::Error` if `db_path` is not able to create a database. +pub fn connect(db_driver: &Drivers, db_path: &str) -> Result, r2d2::Error> { let database: Box = match db_driver { - DatabaseDrivers::Sqlite3 => { + Drivers::Sqlite3 => { let db = SqliteDatabase::new(db_path)?; Box::new(db) } - DatabaseDrivers::MySQL => { + Drivers::MySQL => { let db = MysqlDatabase::new(db_path)?; Box::new(db) } @@ -32,6 +35,9 @@ pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result Result<(), Error>; async fn load_persistent_torrents(&self) -> Result, Error>; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 6aae06a4b..680f2635d 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -40,7 +40,7 @@ impl TorrentTracker { stats_event_sender: Option>, stats_repository: StatsRepository, ) -> Result { - let database = database::connect_database(&config.db_driver, &config.db_path)?; + let database = database::connect(&config.db_driver, &config.db_path)?; Ok(TorrentTracker { config: config.clone(), From 38eabc4ae5647b6d885bcff8dfb32adc04c62b3b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:56:53 +0100 Subject: [PATCH 165/435] clippy: fix src/databases/mysql.rs --- src/databases/database.rs | 4 ++-- src/databases/mysql.rs | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index 212224b25..7344010d8 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -2,7 +2,7 @@ use async_trait::async_trait; use derive_more::{Display, Error}; use serde::{Deserialize, Serialize}; -use crate::databases::mysql::MysqlDatabase; +use crate::databases::mysql::Mysql; use crate::databases::sqlite::SqliteDatabase; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; @@ -23,7 +23,7 @@ pub fn connect(db_driver: &Drivers, db_path: &str) -> Result, Box::new(db) } Drivers::MySQL => { - let db = MysqlDatabase::new(db_path)?; + let db = Mysql::new(db_path)?; Box::new(db) } }; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 5e7410ac2..5db358d5a 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -13,11 +13,14 @@ use crate::databases::database::{Database, Error}; use crate::protocol::common::{InfoHash, AUTH_KEY_LENGTH}; use crate::tracker::key::AuthKey; -pub struct MysqlDatabase { +pub struct Mysql { pool: Pool, } -impl MysqlDatabase { +impl Mysql { + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `MySQL` database. pub fn new(db_path: &str) -> Result { let opts = Opts::from_url(db_path).expect("Failed to connect to MySQL database."); let builder = OptsBuilder::from_opts(opts); @@ -31,7 +34,7 @@ impl MysqlDatabase { } #[async_trait] -impl Database for MysqlDatabase { +impl Database for Mysql { fn create_database_tables(&self) -> Result<(), database::Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -57,7 +60,7 @@ impl Database for MysqlDatabase { PRIMARY KEY (`id`), UNIQUE (`key`) );", - AUTH_KEY_LENGTH as i8 + i8::try_from(AUTH_KEY_LENGTH).expect("Auth Key Length Should fit within a i8!") ); let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; @@ -95,7 +98,7 @@ impl Database for MysqlDatabase { "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| AuthKey { key, - valid_until: Some(Duration::from_secs(valid_until as u64)), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, ) .map_err(|_| database::Error::QueryReturnedNoRows)?; @@ -188,7 +191,7 @@ impl Database for MysqlDatabase { { Some((key, valid_until)) => Ok(AuthKey { key, - valid_until: Some(Duration::from_secs(valid_until as u64)), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }), None => Err(database::Error::InvalidQuery), } From be6676a6315022ce4a3d7d2a02482c2a40a67798 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 19:59:28 +0100 Subject: [PATCH 166/435] clippy: fix src/databases/sqlite.rs --- src/databases/database.rs | 4 ++-- src/databases/sqlite.rs | 19 +++++++++++-------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index 7344010d8..62105dee5 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -3,7 +3,7 @@ use derive_more::{Display, Error}; use serde::{Deserialize, Serialize}; use crate::databases::mysql::Mysql; -use crate::databases::sqlite::SqliteDatabase; +use crate::databases::sqlite::Sqlite; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; @@ -19,7 +19,7 @@ pub enum Drivers { pub fn connect(db_driver: &Drivers, db_path: &str) -> Result, r2d2::Error> { let database: Box = match db_driver { Drivers::Sqlite3 => { - let db = SqliteDatabase::new(db_path)?; + let db = Sqlite::new(db_path)?; Box::new(db) } Drivers::MySQL => { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 19849f297..ee637049b 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -11,20 +11,23 @@ use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::common::InfoHash; use crate::tracker::key::AuthKey; -pub struct SqliteDatabase { +pub struct Sqlite { pool: Pool, } -impl SqliteDatabase { - pub fn new(db_path: &str) -> Result { +impl Sqlite { + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. + pub fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); let pool = Pool::new(cm).expect("Failed to create r2d2 SQLite connection pool."); - Ok(SqliteDatabase { pool }) + Ok(Sqlite { pool }) } } #[async_trait] -impl Database for SqliteDatabase { +impl Database for Sqlite { fn create_database_tables(&self) -> Result<(), database::Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -86,7 +89,7 @@ impl Database for SqliteDatabase { Ok(AuthKey { key, - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; @@ -191,11 +194,11 @@ impl Database for SqliteDatabase { if let Some(row) = rows.next()? { let key: String = row.get(0).unwrap(); - let valid_until_i64: i64 = row.get(1).unwrap(); + let valid_until: i64 = row.get(1).unwrap(); Ok(AuthKey { key, - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until_i64 as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) } else { Err(database::Error::QueryReturnedNoRows) From b5ce7e9f0cfc6f11a83e781f4e85b3c6c5e93a0d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 21:36:53 +0100 Subject: [PATCH 167/435] clippy: fix src/http/filters.rs --- src/http/filters.rs | 66 ++++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 30 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index f28909c7f..f2e214e87 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -19,13 +19,15 @@ pub fn with_tracker(tracker: Arc) -> impl Filter impl Filter,), Error = Rejection> + Clone { - warp::filters::query::raw().and_then(info_hashes) + warp::filters::query::raw().and_then(|q| async move { info_hashes(&q) }) } /// Check for `PeerId` +#[must_use] pub fn with_peer_id() -> impl Filter + Clone { - warp::filters::query::raw().and_then(peer_id) + warp::filters::query::raw().and_then(|q| async move { peer_id(&q) }) } /// Pass Arc along @@ -37,34 +39,38 @@ pub fn with_auth_key() -> impl Filter,), Error = Infa } /// Check for `PeerAddress` +#[must_use] pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { warp::addr::remote() .and(warp::header::optional::("X-Forwarded-For")) .map(move |remote_addr: Option, x_forwarded_for: Option| { (on_reverse_proxy, remote_addr, x_forwarded_for) }) - .and_then(peer_addr) + .and_then(|q| async move { peer_addr(q) }) } /// Check for `AnnounceRequest` +#[must_use] pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) .and(with_peer_addr(on_reverse_proxy)) - .and_then(announce_request) + .and_then(|q, r, s, t| async move { announce_request(q, &r, s, t) }) } /// Check for `ScrapeRequest` +#[must_use] pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) - .and_then(scrape_request) + .and_then(|q, r| async move { scrape_request(q, r) }) } /// Parse `InfoHash` from raw query string -async fn info_hashes(raw_query: String) -> WebResult> { +#[allow(clippy::ptr_arg)] +fn info_hashes(raw_query: &String) -> WebResult> { let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut info_hashes: Vec = Vec::new(); @@ -89,7 +95,8 @@ async fn info_hashes(raw_query: String) -> WebResult> { } /// Parse `PeerId` from raw query string -async fn peer_id(raw_query: String) -> WebResult { +#[allow(clippy::ptr_arg)] +fn peer_id(raw_query: &String) -> WebResult { // put all query params in a vec let split_raw_query: Vec<&str> = raw_query.split('&').collect(); @@ -118,17 +125,14 @@ async fn peer_id(raw_query: String) -> WebResult { } } - if peer_id.is_none() { - Err(reject::custom(ServerError::InvalidPeerId)) - } else { - Ok(peer_id.unwrap()) + match peer_id { + Some(id) => Ok(id), + None => Err(reject::custom(ServerError::InvalidPeerId)), } } /// Get `PeerAddress` from `RemoteAddress` or Forwarded -async fn peer_addr( - (on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option), -) -> WebResult { +fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { return Err(reject::custom(ServerError::AddressNotFound)); } @@ -137,26 +141,27 @@ async fn peer_addr( return Err(reject::custom(ServerError::AddressNotFound)); } - match on_reverse_proxy { - true => { - let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); - // remove whitespace chars - x_forwarded_for_raw.retain(|c| !c.is_whitespace()); - // get all forwarded ip's in a vec - let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); - // set client ip to last forwarded ip - let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - - IpAddr::from_str(x_forwarded_ip).map_err(|_| reject::custom(ServerError::AddressNotFound)) - } - false => Ok(remote_addr.unwrap().ip()), + if on_reverse_proxy { + let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); + // remove whitespace chars + x_forwarded_for_raw.retain(|c| !c.is_whitespace()); + // get all forwarded ip's in a vec + let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); + // set client ip to last forwarded ip + let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); + + IpAddr::from_str(x_forwarded_ip).map_err(|_| reject::custom(ServerError::AddressNotFound)) + } else { + Ok(remote_addr.unwrap().ip()) } } /// Parse `AnnounceRequest` from raw `AnnounceRequestQuery`, `InfoHash` and Option -async fn announce_request( +#[allow(clippy::unnecessary_wraps)] +#[allow(clippy::ptr_arg)] +fn announce_request( announce_request_query: AnnounceRequestQuery, - info_hashes: Vec, + info_hashes: &Vec, peer_id: PeerId, peer_addr: IpAddr, ) -> WebResult { @@ -174,6 +179,7 @@ async fn announce_request( } /// Parse `ScrapeRequest` from `InfoHash` -async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { +#[allow(clippy::unnecessary_wraps)] +fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { Ok(Scrape { info_hashes, peer_addr }) } From 75bef77799f46c281eb3a8adae947705c9d1186f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:04:00 +0100 Subject: [PATCH 168/435] clippy: fix src/http/handlers.rs --- src/http/handlers.rs | 56 ++++++++++++++++++++++++++------------------ src/http/routes.rs | 5 +++- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index a312ff105..064047ba0 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -19,37 +19,38 @@ use crate::tracker::torrent::{TorrentError, TorrentStats}; use crate::tracker::TorrentTracker; /// Authenticate `InfoHash` using optional `AuthKey` +/// +/// # Errors +/// +/// Will return `ServerError` that wraps the `TorrentError` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, auth_key: &Option, tracker: Arc, ) -> Result<(), ServerError> { - match tracker.authenticate_request(info_hash, auth_key).await { - Ok(_) => Ok(()), - Err(e) => { - let err = match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, - }; - - Err(err) - } - } + tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { + TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, + TorrentError::NoPeersFound => ServerError::NoPeersFound, + TorrentError::CouldNotSendResponse => ServerError::InternalServerError, + TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + }) } /// Handle announce request +/// +/// # Errors +/// +/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_announce( announce_request: request::Announce, auth_key: Option, tracker: Arc, ) -> WebResult { - if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { - return Err(reject::custom(e)); - } + authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) + .await + .map_err(reject::custom)?; debug!("{:?}", announce_request); @@ -76,14 +77,18 @@ pub async fn handle_announce( send_announce_response( &announce_request, - torrent_stats, - peers, + &torrent_stats, + &peers, announce_interval, tracker.config.min_announce_interval, ) } /// Handle scrape request +/// +/// # Errors +/// +/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, auth_key: Option, @@ -134,10 +139,11 @@ pub async fn handle_scrape( } /// Send announce response +#[allow(clippy::ptr_arg)] fn send_announce_response( announce_request: &request::Announce, - torrent_stats: TorrentStats, - peers: Vec, + torrent_stats: &TorrentStats, + peers: &Vec, interval: u32, interval_min: u32, ) -> WebResult { @@ -180,7 +186,11 @@ fn send_scrape_response(files: HashMap) -> WebRes } /// Handle all server errors and send error reply -pub async fn send_error(r: Rejection) -> std::result::Result { +/// +/// # Errors +/// +/// Will not return a error, `Infallible`, but instead convert the `ServerError` into a `Response`. +pub fn send_error(r: &Rejection) -> std::result::Result { let body = if let Some(server_error) = r.find::() { debug!("{:?}", server_error); Error { diff --git a/src/http/routes.rs b/src/http/routes.rs index f82bf45bc..992febc2c 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -8,8 +8,11 @@ use super::handlers::{handle_announce, handle_scrape, send_error}; use crate::tracker::TorrentTracker; /// All routes +#[must_use] pub fn routes(tracker: Arc) -> impl Filter + Clone { - announce(tracker.clone()).or(scrape(tracker)).recover(send_error) + announce(tracker.clone()) + .or(scrape(tracker)) + .recover(|q| async move { send_error(&q) }) } /// GET /announce or /announce/ From 208b10eaf1da30627c6503a6854f914c7de4eb6f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:05:30 +0100 Subject: [PATCH 169/435] clippy: fix src/http/server.rs --- src/http/server.rs | 8 ++++---- src/jobs/http_tracker.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/http/server.rs b/src/http/server.rs index 97ec30aa0..755fdc73a 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -6,14 +6,14 @@ use crate::tracker::TorrentTracker; /// Server that listens on HTTP, needs a `TorrentTracker` #[derive(Clone)] -pub struct HttpServer { +pub struct Http { tracker: Arc, } -impl HttpServer { +impl Http { #[must_use] - pub fn new(tracker: Arc) -> HttpServer { - HttpServer { tracker } + pub fn new(tracker: Arc) -> Http { + Http { tracker } } /// Start the `HttpServer` diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index f6023a4e0..d0c289e81 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -5,7 +5,7 @@ use log::{info, warn}; use tokio::task::JoinHandle; use crate::config::HttpTracker; -use crate::http::server::HttpServer; +use crate::http::server::Http; use crate::tracker::TorrentTracker; #[must_use] @@ -16,7 +16,7 @@ pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHand let ssl_key_path = config.ssl_key_path.clone(); tokio::spawn(async move { - let http_tracker = HttpServer::new(tracker); + let http_tracker = Http::new(tracker); if !ssl_enabled { info!("Starting HTTP server on: {}", bind_addr); From 577ddb97b0b25eb766bcdd99f222850b9375e013 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:07:52 +0100 Subject: [PATCH 170/435] clippy: fix src/jobs/http_tracker.rs --- src/jobs/http_tracker.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index d0c289e81..276da8099 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -8,6 +8,9 @@ use crate::config::HttpTracker; use crate::http::server::Http; use crate::tracker::TorrentTracker; +/// # Panics +/// +/// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. #[must_use] pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); From d092580db3bd53206b44d98df820b0c3f7de391c Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:12:33 +0100 Subject: [PATCH 171/435] clippy: fix src/jobs/torrent_cleanup.rs --- src/jobs/torrent_cleanup.rs | 6 +++--- src/setup.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 3d7b49d6b..7bdfc1677 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -8,8 +8,8 @@ use crate::config::Configuration; use crate::tracker::TorrentTracker; #[must_use] -pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); +pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; tokio::spawn(async move { @@ -28,7 +28,7 @@ pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHa let start_time = Utc::now().time(); info!("Cleaning up torrents.."); tracker.cleanup_torrents().await; - info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()) + info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; } diff --git a/src/setup.rs b/src/setup.rs index 736f448b6..804b6258a 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -54,7 +54,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(config, tracker.clone())); + jobs.push(torrent_cleanup::start_job(config, &tracker)); } jobs From 9adbfd137fa83d90e4e3073ad9adc240afccbc04 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:13:58 +0100 Subject: [PATCH 172/435] clippy: fix src/logging.rs --- src/logging.rs | 4 ++-- src/main.rs | 2 +- tests/api.rs | 2 +- tests/udp.rs | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/logging.rs b/src/logging.rs index 7682bace1..4d16f7670 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -7,7 +7,7 @@ use crate::config::Configuration; static INIT: Once = Once::new(); -pub fn setup_logging(cfg: &Configuration) { +pub fn setup(cfg: &Configuration) { let level = config_level_or_default(&cfg.log_level); if level == log::LevelFilter::Off { @@ -35,7 +35,7 @@ fn stdout_config(level: LevelFilter) { record.target(), record.level(), message - )) + )); }) .level(level) .chain(std::io::stdout()) diff --git a/src/main.rs b/src/main.rs index f64354fcf..baffc6fa5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -36,7 +36,7 @@ async fn main() { }; // Initialize logging - logging::setup_logging(&config); + logging::setup(&config); // Run jobs let jobs = setup::setup(&config, tracker.clone()).await; diff --git a/tests/api.rs b/tests/api.rs index 14fefa50e..6cfcbc092 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -288,7 +288,7 @@ mod tracker_api { self.tracker = Some(tracker.clone()); // Initialize logging - logging::setup_logging(&configuration); + logging::setup(&configuration); // Start the HTTP API job self.job = Some(tracker_api::start_job(&configuration, tracker).await); diff --git a/tests/udp.rs b/tests/udp.rs index 54caeaa68..b365c4fc6 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -69,7 +69,7 @@ mod udp_tracker_server { }; // Initialize logging - logging::setup_logging(&configuration); + logging::setup(&configuration); let udp_tracker_config = &configuration.udp_trackers[0]; From c78404ff33915057cc2cbc70a041e324fb30ea43 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:27:38 +0100 Subject: [PATCH 173/435] clippy: fix src/protocol/clock/mod.rs --- src/api/resources/auth_key_resource.rs | 6 +- src/protocol/clock/mod.rs | 94 ++++++++++++++------------ src/protocol/clock/time_extent.rs | 24 +++---- src/tracker/key.rs | 14 ++-- src/tracker/peer.rs | 10 +-- src/tracker/torrent.rs | 12 ++-- src/udp/connection_cookie.rs | 10 +-- src/udp/handlers.rs | 4 +- 8 files changed, 91 insertions(+), 83 deletions(-) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 3bc0cefb7..9bcfca596 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -36,7 +36,7 @@ mod tests { use std::time::Duration; use super::AuthKeyResource; - use crate::protocol::clock::{DefaultClock, TimeNow}; + use crate::protocol::clock::{Current, TimeNow}; use crate::tracker::key::AuthKey; #[test] @@ -52,7 +52,7 @@ mod tests { AuthKey::from(auth_key_resource), AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()) + valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } ); } @@ -63,7 +63,7 @@ mod tests { let auth_key = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()), + valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; assert_eq!( diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs index 51197dba6..7868d4c5e 100644 --- a/src/protocol/clock/mod.rs +++ b/src/protocol/clock/mod.rs @@ -4,7 +4,7 @@ use std::time::Duration; pub type DurationSinceUnixEpoch = Duration; #[derive(Debug)] -pub enum ClockType { +pub enum Type { WorkingClock, StoppedClock, } @@ -12,14 +12,14 @@ pub enum ClockType { #[derive(Debug)] pub struct Clock; -pub type WorkingClock = Clock<{ ClockType::WorkingClock as usize }>; -pub type StoppedClock = Clock<{ ClockType::StoppedClock as usize }>; +pub type Working = Clock<{ Type::WorkingClock as usize }>; +pub type Stopped = Clock<{ Type::StoppedClock as usize }>; #[cfg(not(test))] -pub type DefaultClock = WorkingClock; +pub type Current = Working; #[cfg(test)] -pub type DefaultClock = StoppedClock; +pub type Current = Stopped; pub trait Time: Sized { fn now() -> DurationSinceUnixEpoch; @@ -40,44 +40,52 @@ pub trait TimeNow: Time { mod tests { use std::any::TypeId; - use crate::protocol::clock::{DefaultClock, StoppedClock, Time, WorkingClock}; + use crate::protocol::clock::{Current, Stopped, Time, Working}; #[test] fn it_should_be_the_stopped_clock_as_default_when_testing() { // We are testing, so we should default to the fixed time. - assert_eq!(TypeId::of::(), TypeId::of::()); - assert_eq!(StoppedClock::now(), DefaultClock::now()) + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(Stopped::now(), Current::now()); } #[test] fn it_should_have_different_times() { - assert_ne!(TypeId::of::(), TypeId::of::()); - assert_ne!(StoppedClock::now(), WorkingClock::now()) + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(Stopped::now(), Working::now()); } } mod working_clock { use std::time::SystemTime; - use super::{DurationSinceUnixEpoch, Time, TimeNow, WorkingClock}; + use super::{DurationSinceUnixEpoch, Time, TimeNow, Working}; - impl Time for WorkingClock { + impl Time for Working { fn now() -> DurationSinceUnixEpoch { SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() } } - impl TimeNow for WorkingClock {} + impl TimeNow for Working {} } pub trait StoppedTime: TimeNow { fn local_set(unix_time: &DurationSinceUnixEpoch); fn local_set_to_unix_epoch() { - Self::local_set(&DurationSinceUnixEpoch::ZERO) + Self::local_set(&DurationSinceUnixEpoch::ZERO); } fn local_set_to_app_start_time(); fn local_set_to_system_time_now(); + + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; fn local_reset(); } @@ -86,9 +94,9 @@ mod stopped_clock { use std::num::IntErrorKind; use std::time::Duration; - use super::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow}; + use super::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow}; - impl Time for StoppedClock { + impl Time for Stopped { fn now() -> DurationSinceUnixEpoch { detail::FIXED_TIME.with(|time| { return *time.borrow(); @@ -96,21 +104,21 @@ mod stopped_clock { } } - impl TimeNow for StoppedClock {} + impl TimeNow for Stopped {} - impl StoppedTime for StoppedClock { + impl StoppedTime for Stopped { fn local_set(unix_time: &DurationSinceUnixEpoch) { detail::FIXED_TIME.with(|time| { *time.borrow_mut() = *unix_time; - }) + }); } fn local_set_to_app_start_time() { - Self::local_set(&detail::get_app_start_time()) + Self::local_set(&detail::get_app_start_time()); } fn local_set_to_system_time_now() { - Self::local_set(&detail::get_app_start_time()) + Self::local_set(&detail::get_app_start_time()); } fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { @@ -140,7 +148,7 @@ mod stopped_clock { } fn local_reset() { - Self::local_set(&detail::get_default_fixed_time()) + Self::local_set(&detail::get_default_fixed_time()); } } @@ -149,58 +157,58 @@ mod stopped_clock { use std::thread; use std::time::Duration; - use crate::protocol::clock::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; + use crate::protocol::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; #[test] fn it_should_default_to_zero_when_testing() { - assert_eq!(StoppedClock::now(), DurationSinceUnixEpoch::ZERO) + assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); } #[test] fn it_should_possible_to_set_the_time() { // Check we start with ZERO. - assert_eq!(StoppedClock::now(), Duration::ZERO); + assert_eq!(Stopped::now(), Duration::ZERO); // Set to Current Time and Check - let timestamp = WorkingClock::now(); - StoppedClock::local_set(×tamp); - assert_eq!(StoppedClock::now(), timestamp); + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); // Elapse the Current Time and Check - StoppedClock::local_add(×tamp).unwrap(); - assert_eq!(StoppedClock::now(), timestamp + timestamp); + Stopped::local_add(×tamp).unwrap(); + assert_eq!(Stopped::now(), timestamp + timestamp); // Reset to ZERO and Check - StoppedClock::local_reset(); - assert_eq!(StoppedClock::now(), Duration::ZERO); + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); } #[test] fn it_should_default_to_zero_on_thread_exit() { - assert_eq!(StoppedClock::now(), Duration::ZERO); - let after5 = WorkingClock::add(&Duration::from_secs(5)).unwrap(); - StoppedClock::local_set(&after5); - assert_eq!(StoppedClock::now(), after5); + assert_eq!(Stopped::now(), Duration::ZERO); + let after5 = Working::add(&Duration::from_secs(5)).unwrap(); + Stopped::local_set(&after5); + assert_eq!(Stopped::now(), after5); let t = thread::spawn(move || { // each thread starts out with the initial value of ZERO - assert_eq!(StoppedClock::now(), Duration::ZERO); + assert_eq!(Stopped::now(), Duration::ZERO); // and gets set to the current time. - let timestamp = WorkingClock::now(); - StoppedClock::local_set(×tamp); - assert_eq!(StoppedClock::now(), timestamp); + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); }); // wait for the thread to complete and bail out on panic t.join().unwrap(); // we retain our original value of current time + 5sec despite the child thread - assert_eq!(StoppedClock::now(), after5); + assert_eq!(Stopped::now(), after5); // Reset to ZERO and Check - StoppedClock::local_reset(); - assert_eq!(StoppedClock::now(), Duration::ZERO); + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); } } diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index f975e9a04..0ff74400b 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -1,7 +1,7 @@ use std::num::{IntErrorKind, TryFromIntError}; use std::time::Duration; -use super::{ClockType, StoppedClock, TimeNow, WorkingClock}; +use super::{Stopped, TimeNow, Type, Working}; pub trait Extent: Sized + Default { type Base; @@ -156,11 +156,11 @@ where #[derive(Debug)] pub struct TimeExtentMaker {} -pub type WorkingTimeExtentMaker = TimeExtentMaker<{ ClockType::WorkingClock as usize }>; -pub type StoppedTimeExtentMaker = TimeExtentMaker<{ ClockType::StoppedClock as usize }>; +pub type WorkingTimeExtentMaker = TimeExtentMaker<{ Type::WorkingClock as usize }>; +pub type StoppedTimeExtentMaker = TimeExtentMaker<{ Type::StoppedClock as usize }>; -impl MakeTimeExtent for WorkingTimeExtentMaker {} -impl MakeTimeExtent for StoppedTimeExtentMaker {} +impl MakeTimeExtent for WorkingTimeExtentMaker {} +impl MakeTimeExtent for StoppedTimeExtentMaker {} #[cfg(not(test))] pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; @@ -175,7 +175,7 @@ mod test { checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, TimeExtentMultiplier, TimeExtentProduct, MAX, ZERO, }; - use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; + use crate::protocol::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); @@ -443,7 +443,7 @@ mod test { } ); - DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + Current::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); assert_eq!( DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), @@ -458,7 +458,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) .unwrap() @@ -493,13 +493,13 @@ mod test { None ); - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) .unwrap() @@ -515,7 +515,7 @@ mod test { #[test] fn it_should_give_a_time_extent() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before( @@ -546,7 +546,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) .unwrap() diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 881dac877..2b6e71223 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -6,7 +6,7 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::Serialize; -use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; #[must_use] @@ -21,12 +21,12 @@ pub fn generate_auth_key(lifetime: Duration) -> AuthKey { AuthKey { key, - valid_until: Some(DefaultClock::add(&lifetime).unwrap()), + valid_until: Some(Current::add(&lifetime).unwrap()), } } pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { - let current_time: DurationSinceUnixEpoch = DefaultClock::now(); + let current_time: DurationSinceUnixEpoch = Current::now(); if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid); } @@ -88,7 +88,7 @@ impl From for Error { mod tests { use std::time::Duration; - use crate::protocol::clock::{DefaultClock, StoppedTime}; + use crate::protocol::clock::{Current, StoppedTime}; use crate::tracker::key; #[test] @@ -121,18 +121,18 @@ mod tests { #[test] fn generate_and_check_expired_auth_key() { // Set the time to the current time. - DefaultClock::local_set_to_system_time_now(); + Current::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. let auth_key = key::generate_auth_key(Duration::from_secs(19)); // Mock the time has passed 10 sec. - DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); + Current::local_add(&Duration::from_secs(10)).unwrap(); assert!(key::verify_auth_key(&auth_key).is_ok()); // Mock the time has passed another 10 sec. - DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); + Current::local_add(&Duration::from_secs(10)).unwrap(); assert!(key::verify_auth_key(&auth_key).is_err()); } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index a30723d00..115a2bfb9 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -5,7 +5,7 @@ use serde; use serde::Serialize; use crate::http::request::Announce; -use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; +use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; use crate::protocol::utils::ser_unix_time_value; @@ -37,7 +37,7 @@ impl TorrentPeer { TorrentPeer { peer_id: PeerId(announce_request.peer_id.0), peer_addr, - updated: DefaultClock::now(), + updated: Current::now(), uploaded: announce_request.bytes_uploaded, downloaded: announce_request.bytes_downloaded, left: announce_request.bytes_left, @@ -63,7 +63,7 @@ impl TorrentPeer { TorrentPeer { peer_id: announce_request.peer_id, peer_addr, - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(announce_request.uploaded as i64), downloaded: NumberOfBytes(announce_request.downloaded as i64), left: NumberOfBytes(announce_request.left as i64), @@ -95,7 +95,7 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::protocol::clock::{DefaultClock, Time}; + use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; use crate::tracker::peer::TorrentPeer; @@ -104,7 +104,7 @@ mod test { let torrent_peer = TorrentPeer { peer_id: PeerId(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 46608643d..4007976c9 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -5,7 +5,7 @@ use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; use super::peer::TorrentPeer; -use crate::protocol::clock::{DefaultClock, TimeNow}; +use crate::protocol::clock::{Current, TimeNow}; use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone, Debug)] @@ -80,7 +80,7 @@ impl TorrentEntry { } pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = DefaultClock::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); + let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); self.peers.retain(|_, peer| peer.updated > current_cutoff); } } @@ -116,7 +116,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}; + use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; use crate::protocol::common::PeerId; use crate::tracker::peer::TorrentPeer; use crate::tracker::torrent::TorrentEntry; @@ -130,7 +130,7 @@ mod tests { let default_peer = TorrentPeer { peer_id: PeerId([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), @@ -358,8 +358,8 @@ mod tests { let timeout = 120u32; - let now = WorkingClock::now(); - StoppedClock::local_set(&now); + let now = Working::now(); + Stopped::local_set(&now); let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); let inactive_peer = TorrentPeerBuilder::default() diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index b18940dfc..1b77d47e2 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -84,7 +84,7 @@ mod tests { use super::cookie_builder::{self}; use crate::protocol::clock::time_extent::{self, Extent}; - use crate::protocol::clock::{StoppedClock, StoppedTime}; + use crate::protocol::clock::{Stopped, StoppedTime}; use crate::udp::connection_cookie::{check_connection_cookie, make_connection_cookie, Cookie, COOKIE_LIFETIME}; // #![feature(const_socketaddr)] @@ -195,7 +195,7 @@ mod tests { let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); let cookie_next = make_connection_cookie(&remote_address); @@ -217,7 +217,7 @@ mod tests { let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); check_connection_cookie(&remote_address, &cookie).unwrap(); } @@ -228,7 +228,7 @@ mod tests { let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); check_connection_cookie(&remote_address, &cookie).unwrap(); } @@ -240,7 +240,7 @@ mod tests { let cookie = make_connection_cookie(&remote_address); - StoppedClock::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); check_connection_cookie(&remote_address, &cookie).unwrap(); } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 81578e9c3..679a11ffc 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -253,7 +253,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::config::Configuration; - use crate::protocol::clock::{DefaultClock, Time}; + use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; use crate::tracker::mode::TrackerMode; use crate::tracker::peer::TorrentPeer; @@ -309,7 +309,7 @@ mod tests { let default_peer = TorrentPeer { peer_id: PeerId([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), From 5ea7c0d8d047316a90235c945607f16ec7eb77fe Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:30:17 +0100 Subject: [PATCH 174/435] clippy: fix src/protocol/clock/time_extent.rs --- src/protocol/clock/time_extent.rs | 156 ++++++++++++++---------------- src/udp/connection_cookie.rs | 2 +- 2 files changed, 74 insertions(+), 84 deletions(-) diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index 0ff74400b..b4c20cd70 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -10,37 +10,44 @@ pub trait Extent: Sized + Default { fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; + /// # Errors + /// + /// Will return `IntErrorKind` if `add` would overflow the internal `Duration`. fn increase(&self, add: Self::Multiplier) -> Result; + + /// # Errors + /// + /// Will return `IntErrorKind` if `sub` would underflow the internal `Duration`. fn decrease(&self, sub: Self::Multiplier) -> Result; fn total(&self) -> Option>; fn total_next(&self) -> Option>; } -pub type TimeExtentBase = Duration; -pub type TimeExtentMultiplier = u64; -pub type TimeExtentProduct = TimeExtentBase; +pub type Base = Duration; +pub type Multiplier = u64; +pub type Product = Base; #[derive(Debug, Default, Hash, PartialEq, Eq)] pub struct TimeExtent { - pub increment: TimeExtentBase, - pub amount: TimeExtentMultiplier, + pub increment: Base, + pub amount: Multiplier, } pub const ZERO: TimeExtent = TimeExtent { - increment: TimeExtentBase::ZERO, - amount: TimeExtentMultiplier::MIN, + increment: Base::ZERO, + amount: Multiplier::MIN, }; pub const MAX: TimeExtent = TimeExtent { - increment: TimeExtentBase::MAX, - amount: TimeExtentMultiplier::MAX, + increment: Base::MAX, + amount: Multiplier::MAX, }; impl TimeExtent { #[must_use] - pub const fn from_sec(seconds: u64, amount: &TimeExtentMultiplier) -> Self { + pub const fn from_sec(seconds: u64, amount: &Multiplier) -> Self { Self { - increment: TimeExtentBase::from_secs(seconds), + increment: Base::from_secs(seconds), amount: *amount, } } @@ -61,9 +68,9 @@ fn checked_duration_from_nanos(time: u128) -> Result } impl Extent for TimeExtent { - type Base = TimeExtentBase; - type Multiplier = TimeExtentMultiplier; - type Product = TimeExtentProduct; + type Base = Base; + type Multiplier = Multiplier; + type Product = Product; fn new(increment: &Self::Base, amount: &Self::Multiplier) -> Self { Self { @@ -107,60 +114,58 @@ impl Extent for TimeExtent { } } -pub trait MakeTimeExtent: Sized +pub trait Make: Sized where Clock: TimeNow, { #[must_use] - fn now(increment: &TimeExtentBase) -> Option> { + fn now(increment: &Base) -> Option> { Clock::now() .as_nanos() .checked_div((*increment).as_nanos()) - .map(|amount| match TimeExtentMultiplier::try_from(amount) { + .map(|amount| match Multiplier::try_from(amount) { Err(error) => Err(error), Ok(amount) => Ok(TimeExtent::new(increment, &amount)), }) } #[must_use] - fn now_after(increment: &TimeExtentBase, add_time: &Duration) -> Option> { + fn now_after(increment: &Base, add_time: &Duration) -> Option> { match Clock::add(add_time) { None => None, - Some(time) => { - time.as_nanos() - .checked_div(increment.as_nanos()) - .map(|amount| match TimeExtentMultiplier::try_from(amount) { - Err(error) => Err(error), - Ok(amount) => Ok(TimeExtent::new(increment, &amount)), - }) - } + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), } } #[must_use] - fn now_before(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { + fn now_before(increment: &Base, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { None => None, - Some(time) => { - time.as_nanos() - .checked_div(increment.as_nanos()) - .map(|amount| match TimeExtentMultiplier::try_from(amount) { - Err(error) => Err(error), - Ok(amount) => Ok(TimeExtent::new(increment, &amount)), - }) - } + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), } } } #[derive(Debug)] -pub struct TimeExtentMaker {} +pub struct Maker {} -pub type WorkingTimeExtentMaker = TimeExtentMaker<{ Type::WorkingClock as usize }>; -pub type StoppedTimeExtentMaker = TimeExtentMaker<{ Type::StoppedClock as usize }>; +pub type WorkingTimeExtentMaker = Maker<{ Type::WorkingClock as usize }>; +pub type StoppedTimeExtentMaker = Maker<{ Type::StoppedClock as usize }>; -impl MakeTimeExtent for WorkingTimeExtentMaker {} -impl MakeTimeExtent for StoppedTimeExtentMaker {} +impl Make for WorkingTimeExtentMaker {} +impl Make for StoppedTimeExtentMaker {} #[cfg(not(test))] pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; @@ -172,8 +177,7 @@ pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; mod test { use crate::protocol::clock::time_extent::{ - checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, - TimeExtentMultiplier, TimeExtentProduct, MAX, ZERO, + checked_duration_from_nanos, Base, DefaultTimeExtentMaker, Extent, Make, Multiplier, Product, TimeExtent, MAX, ZERO, }; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; @@ -238,7 +242,7 @@ mod test { #[test] fn it_should_make_empty_for_zero() { - assert_eq!(TimeExtent::from_sec(u64::MIN, &TimeExtentMultiplier::MIN), ZERO); + assert_eq!(TimeExtent::from_sec(u64::MIN, &Multiplier::MIN), ZERO); } #[test] fn it_should_make_from_seconds() { @@ -254,15 +258,15 @@ mod test { #[test] fn it_should_make_empty_for_zero() { - assert_eq!(TimeExtent::new(&TimeExtentBase::ZERO, &TimeExtentMultiplier::MIN), ZERO); + assert_eq!(TimeExtent::new(&Base::ZERO, &Multiplier::MIN), ZERO); } #[test] fn it_should_make_new() { assert_eq!( - TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount), + TimeExtent::new(&Base::from_millis(2), &TIME_EXTENT_VAL.amount), TimeExtent { - increment: TimeExtentBase::from_millis(2), + increment: Base::from_millis(2), amount: TIME_EXTENT_VAL.amount } ); @@ -328,30 +332,27 @@ mod test { #[test] fn it_should_be_zero_for_zero() { - assert_eq!(ZERO.total().unwrap().unwrap(), TimeExtentProduct::ZERO); + assert_eq!(ZERO.total().unwrap().unwrap(), Product::ZERO); } #[test] fn it_should_give_a_total() { assert_eq!( TIME_EXTENT_VAL.total().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) .total() .unwrap() .unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX)) - .total() - .unwrap() - .unwrap(), - TimeExtentProduct::from_secs(u64::MAX) + TimeExtent::new(&Base::from_secs(1), &(u64::MAX)).total().unwrap().unwrap(), + Product::from_secs(u64::MAX) ); } @@ -378,33 +379,33 @@ mod test { #[test] fn it_should_be_zero_for_zero() { - assert_eq!(ZERO.total_next().unwrap().unwrap(), TimeExtentProduct::ZERO); + assert_eq!(ZERO.total_next().unwrap().unwrap(), Product::ZERO); } #[test] fn it_should_give_a_total() { assert_eq!( TIME_EXTENT_VAL.total_next().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) .total_next() .unwrap() .unwrap(), - TimeExtentProduct::new( + Product::new( TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount), - TimeExtentBase::from_millis(2).as_nanos().try_into().unwrap() + Base::from_millis(2).as_nanos().try_into().unwrap() ) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX - 1)) + TimeExtent::new(&Base::from_secs(1), &(u64::MAX - 1)) .total_next() .unwrap() .unwrap(), - TimeExtentProduct::from_secs(u64::MAX) + Product::from_secs(u64::MAX) ); } @@ -453,16 +454,14 @@ mod test { #[test] fn it_should_fail_for_zero() { - assert_eq!(DefaultTimeExtentMaker::now(&TimeExtentBase::ZERO), None); + assert_eq!(DefaultTimeExtentMaker::now(&Base::ZERO), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) - .unwrap() - .unwrap_err(), + DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), u64::try_from(u128::MAX).unwrap_err() ); } @@ -488,20 +487,17 @@ mod test { #[test] fn it_should_fail_for_zero() { - assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); Current::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -519,13 +515,13 @@ mod test { assert_eq!( DefaultTimeExtentMaker::now_before( - &TimeExtentBase::from_secs(u64::from(u32::MAX)), + &Base::from_secs(u64::from(u32::MAX)), &Duration::from_secs(u64::from(u32::MAX)) ) .unwrap() .unwrap(), TimeExtent { - increment: TimeExtentBase::from_secs(u64::from(u32::MAX)), + increment: Base::from_secs(u64::from(u32::MAX)), amount: 4_294_967_296 } ); @@ -533,22 +529,16 @@ mod test { #[test] fn it_should_fail_for_zero() { - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::ZERO), None); - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 1b77d47e2..5a1e564dd 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -54,7 +54,7 @@ mod cookie_builder { use std::net::SocketAddr; use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent}; + use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; use crate::protocol::crypto::keys::seeds::{DefaultSeed, SeedKeeper}; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { From d03269ad9d48a776c7390f18c2a71efa784f1538 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 22:34:11 +0100 Subject: [PATCH 175/435] clippy: fix src/protocol/utils.rs --- src/protocol/utils.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index ac20aa41e..cec02ceaf 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,5 +1,9 @@ use super::clock::DurationSinceUnixEpoch; +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] ser.serialize_u64(unix_time_value.as_millis() as u64) } From efed1bc2c9729c1fcf434db0804570510047359a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 19:37:14 +0100 Subject: [PATCH 176/435] clippy: fix src/setup.rs --- src/setup.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/setup.rs b/src/setup.rs index 804b6258a..cfca5eb9e 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -35,7 +35,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< udp_tracker_config.bind_address, config.mode ); } else { - jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())) + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); } } From 58e5909379203f32ee5d628414f68c143649ffea Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 19:51:36 +0100 Subject: [PATCH 177/435] clippy: fix src/tracker/key.rs --- src/api/resources/auth_key_resource.rs | 18 ++++---- src/databases/database.rs | 8 ++-- src/databases/mysql.rs | 14 +++--- src/databases/sqlite.rs | 14 +++--- src/http/filters.rs | 8 ++-- src/http/handlers.rs | 8 ++-- src/tracker/key.rs | 63 +++++++++++++++----------- src/tracker/mod.rs | 14 +++--- tests/api.rs | 4 +- 9 files changed, 81 insertions(+), 70 deletions(-) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 9bcfca596..9b3cc9646 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -3,7 +3,7 @@ use std::convert::From; use serde::{Deserialize, Serialize}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKeyResource { @@ -11,9 +11,9 @@ pub struct AuthKeyResource { pub valid_until: Option, } -impl From for AuthKey { +impl From for Auth { fn from(auth_key_resource: AuthKeyResource) -> Self { - AuthKey { + Auth { key: auth_key_resource.key, valid_until: auth_key_resource .valid_until @@ -22,8 +22,8 @@ impl From for AuthKey { } } -impl From for AuthKeyResource { - fn from(auth_key: AuthKey) -> Self { +impl From for AuthKeyResource { + fn from(auth_key: Auth) -> Self { AuthKeyResource { key: auth_key.key, valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), @@ -37,7 +37,7 @@ mod tests { use super::AuthKeyResource; use crate::protocol::clock::{Current, TimeNow}; - use crate::tracker::key::AuthKey; + use crate::tracker::key::Auth; #[test] fn it_should_be_convertible_into_an_auth_key() { @@ -49,8 +49,8 @@ mod tests { }; assert_eq!( - AuthKey::from(auth_key_resource), - AuthKey { + Auth::from(auth_key_resource), + Auth { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } @@ -61,7 +61,7 @@ mod tests { fn it_should_be_convertible_from_an_auth_key() { let duration_in_secs = 60; - let auth_key = AuthKey { + let auth_key = Auth { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; diff --git a/src/databases/database.rs b/src/databases/database.rs index 62105dee5..5186f96b3 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::databases::mysql::Mysql; use crate::databases::sqlite::Sqlite; use crate::protocol::common::InfoHash; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub enum Drivers { @@ -42,7 +42,7 @@ pub trait Database: Sync + Send { async fn load_persistent_torrents(&self) -> Result, Error>; - async fn load_keys(&self) -> Result, Error>; + async fn load_keys(&self) -> Result, Error>; async fn load_whitelist(&self) -> Result, Error>; @@ -54,9 +54,9 @@ pub trait Database: Sync + Send { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - async fn get_key_from_keys(&self, key: &str) -> Result; + async fn get_key_from_keys(&self, key: &str) -> Result; - async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result; + async fn add_key_to_keys(&self, auth_key: &Auth) -> Result; async fn remove_key_from_keys(&self, key: &str) -> Result; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 5db358d5a..4fd00e31e 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -11,7 +11,7 @@ use r2d2_mysql::MysqlConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::common::{InfoHash, AUTH_KEY_LENGTH}; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; pub struct Mysql { pool: Pool, @@ -90,13 +90,13 @@ impl Database for Mysql { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let keys: Vec = conn + let keys: Vec = conn .query_map( "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| AuthKey { + |(key, valid_until): (String, i64)| Auth { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, @@ -182,14 +182,14 @@ impl Database for Mysql { } } - async fn get_key_from_keys(&self, key: &str) -> Result { + async fn get_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some((key, valid_until)) => Ok(AuthKey { + Some((key, valid_until)) => Ok(Auth { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }), @@ -197,7 +197,7 @@ impl Database for Mysql { } } - async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { + async fn add_key_to_keys(&self, auth_key: &Auth) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let key = auth_key.key.to_string(); diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index ee637049b..159da9922 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -9,7 +9,7 @@ use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::common::InfoHash; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; pub struct Sqlite { pool: Pool, @@ -78,7 +78,7 @@ impl Database for Sqlite { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -87,13 +87,13 @@ impl Database for Sqlite { let key = row.get(0)?; let valid_until: i64 = row.get(1)?; - Ok(AuthKey { + Ok(Auth { key, valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; - let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -186,7 +186,7 @@ impl Database for Sqlite { } } - async fn get_key_from_keys(&self, key: &str) -> Result { + async fn get_key_from_keys(&self, key: &str) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -196,7 +196,7 @@ impl Database for Sqlite { let key: String = row.get(0).unwrap(); let valid_until: i64 = row.get(1).unwrap(); - Ok(AuthKey { + Ok(Auth { key, valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) @@ -205,7 +205,7 @@ impl Database for Sqlite { } } - async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { + async fn add_key_to_keys(&self, auth_key: &Auth) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute( diff --git a/src/http/filters.rs b/src/http/filters.rs index f2e214e87..3375c781f 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -9,7 +9,7 @@ use super::errors::ServerError; use super::request::{Announce, AnnounceRequestQuery, Scrape}; use super::WebResult; use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; use crate::tracker::TorrentTracker; /// Pass Arc along @@ -32,10 +32,10 @@ pub fn with_peer_id() -> impl Filter + C /// Pass Arc along #[must_use] -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key: String| AuthKey::from_string(&key)) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) + .map(|key: String| Auth::from_string(&key)) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for `PeerAddress` diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 064047ba0..793de9ef5 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -12,7 +12,7 @@ use super::response::{self, Peer, ScrapeResponseEntry}; use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; @@ -25,7 +25,7 @@ use crate::tracker::TorrentTracker; /// Will return `ServerError` that wraps the `TorrentError` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, - auth_key: &Option, + auth_key: &Option, tracker: Arc, ) -> Result<(), ServerError> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { @@ -45,7 +45,7 @@ pub async fn authenticate( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) @@ -91,7 +91,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); diff --git a/src/tracker/key.rs b/src/tracker/key.rs index 2b6e71223..673780ad0 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -10,7 +10,10 @@ use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; #[must_use] -pub fn generate_auth_key(lifetime: Duration) -> AuthKey { +/// # Panics +/// +/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. +pub fn generate(lifetime: Duration) -> Auth { let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) @@ -19,49 +22,57 @@ pub fn generate_auth_key(lifetime: Duration) -> AuthKey { debug!("Generated key: {}, valid for: {:?} seconds", key, lifetime); - AuthKey { + Auth { key, valid_until: Some(Current::add(&lifetime).unwrap()), } } -pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { +/// # Errors +/// +/// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. +/// +/// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. +pub fn verify(auth_key: &Auth) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); - if auth_key.valid_until.is_none() { - return Err(Error::KeyInvalid); - } - if auth_key.valid_until.unwrap() < current_time { - return Err(Error::KeyExpired); - } - Ok(()) + match auth_key.valid_until { + Some(valid_untill) => { + if valid_untill < current_time { + Err(Error::KeyExpired) + } else { + Ok(()) + } + } + None => Err(Error::KeyInvalid), + } } #[derive(Serialize, Debug, Eq, PartialEq, Clone)] -pub struct AuthKey { +pub struct Auth { pub key: String, pub valid_until: Option, } -impl AuthKey { +impl Auth { #[must_use] - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { + pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(AuthKey { key, valid_until: None }) + Some(Auth { key, valid_until: None }) } else { None } } #[must_use] - pub fn from_string(key: &str) -> Option { - if key.len() != AUTH_KEY_LENGTH { - None - } else { - Some(AuthKey { + pub fn from_string(key: &str) -> Option { + if key.len() == AUTH_KEY_LENGTH { + Some(Auth { key: key.to_string(), valid_until: None, }) + } else { + None } } } @@ -93,7 +104,7 @@ mod tests { #[test] fn auth_key_from_buffer() { - let auth_key = key::AuthKey::from_buffer([ + let auth_key = key::Auth::from_buffer([ 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, 49, 52, 110, 114, 74, ]); @@ -105,7 +116,7 @@ mod tests { #[test] fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = key::AuthKey::from_string(key_string); + let auth_key = key::Auth::from_string(key_string); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().key, key_string); @@ -113,9 +124,9 @@ mod tests { #[test] fn generate_valid_auth_key() { - let auth_key = key::generate_auth_key(Duration::new(9999, 0)); + let auth_key = key::generate(Duration::new(9999, 0)); - assert!(key::verify_auth_key(&auth_key).is_ok()); + assert!(key::verify(&auth_key).is_ok()); } #[test] @@ -124,16 +135,16 @@ mod tests { Current::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. - let auth_key = key::generate_auth_key(Duration::from_secs(19)); + let auth_key = key::generate(Duration::from_secs(19)); // Mock the time has passed 10 sec. Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify_auth_key(&auth_key).is_ok()); + assert!(key::verify(&auth_key).is_ok()); // Mock the time has passed another 10 sec. Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify_auth_key(&auth_key).is_err()); + assert!(key::verify(&auth_key).is_err()); } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 680f2635d..1e24326da 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -20,13 +20,13 @@ use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; use crate::protocol::common::InfoHash; -use crate::tracker::key::AuthKey; +use crate::tracker::key::Auth; use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; pub struct TorrentTracker { pub config: Arc, mode: TrackerMode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -66,8 +66,8 @@ impl TorrentTracker { self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed } - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = key::generate_auth_key(lifetime); + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = key::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) @@ -79,10 +79,10 @@ impl TorrentTracker { Ok(()) } - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { + pub async fn verify_auth_key(&self, auth_key: &Auth) -> Result<(), key::Error> { match self.keys.read().await.get(&auth_key.key) { None => Err(key::Error::KeyInvalid), - Some(key) => key::verify_auth_key(key), + Some(key) => key::verify(key), } } @@ -145,7 +145,7 @@ impl TorrentTracker { Ok(()) } - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { // no authentication needed in public mode if self.is_public() { return Ok(()); diff --git a/tests/api.rs b/tests/api.rs index 6cfcbc092..380ab90ca 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -23,7 +23,7 @@ mod tracker_api { use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::common::{InfoHash, PeerId}; - use torrust_tracker::tracker::key::AuthKey; + use torrust_tracker::tracker::key::Auth; use torrust_tracker::tracker::peer::TorrentPeer; use torrust_tracker::tracker::statistics::StatsTracker; use torrust_tracker::tracker::TorrentTracker; @@ -45,7 +45,7 @@ mod tracker_api { assert!(api_server .tracker .unwrap() - .verify_auth_key(&AuthKey::from(auth_key)) + .verify_auth_key(&Auth::from(auth_key)) .await .is_ok()); } From 363b21a19814762321f4401886cc0744e7573eda Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 19:52:30 +0100 Subject: [PATCH 178/435] clippy: fix src/tracker/mode.rs --- src/config.rs | 6 +++--- src/tracker/mod.rs | 9 ++++----- src/tracker/mode.rs | 2 +- src/udp/handlers.rs | 15 +++++++++------ 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/config.rs b/src/config.rs index 6eb83ad16..9f6ca7092 100644 --- a/src/config.rs +++ b/src/config.rs @@ -10,7 +10,7 @@ use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; use crate::databases::database::Drivers; -use crate::tracker::mode::TrackerMode; +use crate::tracker::mode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct UdpTracker { @@ -41,7 +41,7 @@ pub struct HttpApi { #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, - pub mode: TrackerMode, + pub mode: mode::Tracker, pub db_driver: Drivers, pub db_path: String, pub announce_interval: u32, @@ -97,7 +97,7 @@ impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), - mode: TrackerMode::Public, + mode: mode::Tracker::Public, db_driver: Drivers::Sqlite3, db_path: String::from("data.db"), announce_interval: 120, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 1e24326da..0312ac3e2 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,7 +13,6 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::mode::TrackerMode; use self::peer::TorrentPeer; use self::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::config::Configuration; @@ -25,7 +24,7 @@ use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; pub struct TorrentTracker { pub config: Arc, - mode: TrackerMode, + mode: mode::Tracker, keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, @@ -55,15 +54,15 @@ impl TorrentTracker { } pub fn is_public(&self) -> bool { - self.mode == TrackerMode::Public + self.mode == mode::Tracker::Public } pub fn is_private(&self) -> bool { - self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed + self.mode == mode::Tracker::Private || self.mode == mode::Tracker::PrivateListed } pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed + self.mode == mode::Tracker::Listed || self.mode == mode::Tracker::PrivateListed } pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index f444b4523..f1fff169e 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -2,7 +2,7 @@ use serde; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] -pub enum TrackerMode { +pub enum Tracker { // Will track every new info hash and serve every peer. #[serde(rename = "public")] Public, diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 679a11ffc..ecf1beae0 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -255,27 +255,30 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; - use crate::tracker::mode::TrackerMode; use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::StatsTracker; - use crate::tracker::TorrentTracker; + use crate::tracker::{mode, TorrentTracker}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) } fn initialized_public_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Public).into()); initialized_tracker(configuration) } fn initialized_private_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); + let configuration = Arc::new( + TrackerConfigurationBuilder::default() + .with_mode(mode::Tracker::Private) + .into(), + ); initialized_tracker(configuration) } fn initialized_whitelisted_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Listed).into()); initialized_tracker(configuration) } @@ -355,7 +358,7 @@ mod tests { self } - pub fn with_mode(mut self, mode: TrackerMode) -> Self { + pub fn with_mode(mut self, mode: mode::Tracker) -> Self { self.configuration.mode = mode; self } From 0f281c3ed336ee17446d9ec56a167d82480d8c79 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 20:07:55 +0100 Subject: [PATCH 179/435] clippy: fix src/tracker/peer.rs --- src/http/handlers.rs | 7 +++---- src/tracker/mod.rs | 7 +++---- src/tracker/peer.rs | 9 +++++---- src/tracker/torrent.rs | 20 ++++++++++---------- src/udp/handlers.rs | 14 ++++++-------- 5 files changed, 27 insertions(+), 30 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 793de9ef5..5dab842e2 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,10 +13,9 @@ use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::TorrentTracker; +use crate::tracker::{peer, TorrentTracker}; /// Authenticate `InfoHash` using optional `AuthKey` /// @@ -55,7 +54,7 @@ pub async fn handle_announce( debug!("{:?}", announce_request); let peer = - TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + peer::TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) .await; @@ -143,7 +142,7 @@ pub async fn handle_scrape( fn send_announce_response( announce_request: &request::Announce, torrent_stats: &TorrentStats, - peers: &Vec, + peers: &Vec, interval: u32, interval_min: u32, ) -> WebResult { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 0312ac3e2..fab254663 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,7 +13,6 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::peer::TorrentPeer; use self::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::config::Configuration; use crate::databases::database; @@ -195,7 +194,7 @@ impl TorrentTracker { } /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -205,7 +204,7 @@ impl TorrentTracker { } /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -214,7 +213,7 @@ impl TorrentTracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::TorrentPeer) -> TorrentStats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 115a2bfb9..d590b590d 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -60,13 +60,14 @@ impl TorrentPeer { AnnounceEvent::None }; + #[allow(clippy::cast_possible_truncation)] TorrentPeer { - peer_id: announce_request.peer_id, + peer_id: announce_request.peer_id.clone(), peer_addr, updated: Current::now(), - uploaded: NumberOfBytes(announce_request.uploaded as i64), - downloaded: NumberOfBytes(announce_request.downloaded as i64), - left: NumberOfBytes(announce_request.left as i64), + uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), + downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), + left: NumberOfBytes(i128::from(announce_request.left) as i64), event, } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 4007976c9..734e7a66c 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -4,14 +4,14 @@ use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; -use super::peer::TorrentPeer; +use super::peer; use crate::protocol::clock::{Current, TimeNow}; use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct TorrentEntry { #[serde(skip)] - pub peers: std::collections::BTreeMap, + pub peers: std::collections::BTreeMap, pub completed: u32, } @@ -25,7 +25,7 @@ impl TorrentEntry { } // Update peer and return completed (times torrent has been downloaded) - pub fn update_peer(&mut self, peer: &TorrentPeer) -> bool { + pub fn update_peer(&mut self, peer: &peer::TorrentPeer) -> bool { let mut did_torrent_stats_change: bool = false; match peer.event { @@ -49,7 +49,7 @@ impl TorrentEntry { } #[must_use] - pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&TorrentPeer> { + pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&peer::TorrentPeer> { self.peers .values() .filter(|peer| match client_addr { @@ -118,16 +118,16 @@ mod tests { use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; use crate::protocol::common::PeerId; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer; use crate::tracker::torrent::TorrentEntry; struct TorrentPeerBuilder { - peer: TorrentPeer, + peer: peer::TorrentPeer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = TorrentPeer { + let default_peer = peer::TorrentPeer { peer_id: PeerId([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: Current::now(), @@ -164,14 +164,14 @@ mod tests { self } - pub fn into(self) -> TorrentPeer { + pub fn into(self) -> peer::TorrentPeer { self.peer } } /// A torrent seeder is a peer with 0 bytes left to download which /// has not announced it has stopped - fn a_torrent_seeder() -> TorrentPeer { + fn a_torrent_seeder() -> peer::TorrentPeer { TorrentPeerBuilder::default() .with_number_of_bytes_left(0) .with_event_completed() @@ -180,7 +180,7 @@ mod tests { /// A torrent leecher is a peer that is not a seeder. /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> TorrentPeer { + fn a_torrent_leecher() -> peer::TorrentPeer { TorrentPeerBuilder::default() .with_number_of_bytes_left(1) .with_event_completed() diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index ecf1beae0..d1ae72924 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,10 +8,9 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; -use crate::tracker::TorrentTracker; +use crate::tracker::{peer, TorrentTracker}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; @@ -106,7 +105,7 @@ pub async fn handle_announce( authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; - let peer = TorrentPeer::from_udp_announce_request( + let peer = peer::TorrentPeer::from_udp_announce_request( &wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip(), @@ -255,9 +254,8 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; - use crate::tracker::peer::TorrentPeer; use crate::tracker::statistics::StatsTracker; - use crate::tracker::{mode, TorrentTracker}; + use crate::tracker::{mode, peer, TorrentTracker}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) @@ -304,12 +302,12 @@ mod tests { } struct TorrentPeerBuilder { - peer: TorrentPeer, + peer: peer::TorrentPeer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = TorrentPeer { + let default_peer = peer::TorrentPeer { peer_id: PeerId([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), @@ -336,7 +334,7 @@ mod tests { self } - pub fn into(self) -> TorrentPeer { + pub fn into(self) -> peer::TorrentPeer { self.peer } } From 3c2232388fa0bd79fe6b2e9068e6c2375202e5ed Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 21:49:03 +0100 Subject: [PATCH 180/435] clippy: fix src/tracker/statistics.rs --- src/tracker/mod.rs | 4 ++-- src/tracker/statistics.rs | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index fab254663..5877c7f21 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,7 +13,7 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; +use self::statistics::{Metrics, StatsRepository, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; @@ -244,7 +244,7 @@ impl TorrentTracker { self.torrents.read().await } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { self.stats_repository.get_stats().await } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index 609f036aa..fd830fa88 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -25,7 +25,7 @@ pub enum TrackerStatisticsEvent { } #[derive(Debug)] -pub struct TrackerStatistics { +pub struct Metrics { pub tcp4_connections_handled: u64, pub tcp4_announces_handled: u64, pub tcp4_scrapes_handled: u64, @@ -40,13 +40,13 @@ pub struct TrackerStatistics { pub udp6_scrapes_handled: u64, } -impl Default for TrackerStatistics { +impl Default for Metrics { fn default() -> Self { Self::new() } } -impl TrackerStatistics { +impl Metrics { #[must_use] pub fn new() -> Self { Self { @@ -177,7 +177,7 @@ impl TrackerStatisticsEventSender for StatsEventSender { #[derive(Clone)] pub struct StatsRepository { - pub stats: Arc>, + pub stats: Arc>, } impl Default for StatsRepository { @@ -190,11 +190,11 @@ impl StatsRepository { #[must_use] pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), + stats: Arc::new(RwLock::new(Metrics::new())), } } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { self.stats.read().await } @@ -275,7 +275,7 @@ impl StatsRepository { mod tests { mod stats_tracker { - use crate::tracker::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; + use crate::tracker::statistics::{Metrics, StatsTracker, TrackerStatisticsEvent}; #[tokio::test] async fn should_contain_the_tracker_statistics() { @@ -283,7 +283,7 @@ mod tests { let stats = stats_tracker.stats_repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, TrackerStatistics::new().tcp4_announces_handled); + assert_eq!(stats.tcp4_announces_handled, Metrics::new().tcp4_announces_handled); } #[tokio::test] From 143a11e18b5970420a391f848449f19dab7f82da Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 22:18:48 +0100 Subject: [PATCH 181/435] refactor: cleanup src/tracker/statistics.rs naming --- src/http/handlers.rs | 11 ++- src/stats.rs | 8 +-- src/tracker/mod.rs | 13 ++-- src/tracker/statistics.rs | 137 +++++++++++++++++++------------------- src/udp/handlers.rs | 75 ++++++++++----------- tests/api.rs | 4 +- tests/udp.rs | 4 +- 7 files changed, 120 insertions(+), 132 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 5dab842e2..5256ef291 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,9 +13,8 @@ use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::{peer, TorrentTracker}; +use crate::tracker::{peer, statistics, TorrentTracker}; /// Authenticate `InfoHash` using optional `AuthKey` /// @@ -67,10 +66,10 @@ pub async fn handle_announce( // send stats event match announce_request.peer_addr { IpAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Announce).await; + tracker.send_stats_event(statistics::Event::Tcp4Announce).await; } IpAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; + tracker.send_stats_event(statistics::Event::Tcp6Announce).await; } } @@ -127,10 +126,10 @@ pub async fn handle_scrape( // send stats event match scrape_request.peer_addr { IpAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Scrape).await; + tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; } IpAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Scrape).await; + tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; } } diff --git a/src/stats.rs b/src/stats.rs index 738909934..8f87c01a3 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,16 +1,16 @@ -use crate::tracker::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; +use crate::tracker::statistics; #[must_use] -pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, StatsRepository) { +pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { let mut stats_event_sender = None; - let mut stats_tracker = StatsTracker::new(); + let mut stats_tracker = statistics::Keeper::new(); if tracker_usage_statistics { stats_event_sender = Some(stats_tracker.run_event_listener()); } - (stats_event_sender, stats_tracker.stats_repository) + (stats_event_sender, stats_tracker.repository) } #[cfg(test)] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 5877c7f21..d0ab3e514 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -13,7 +13,6 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::statistics::{Metrics, StatsRepository, TrackerStatisticsEvent, TrackerStatisticsEventSender}; use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; @@ -27,16 +26,16 @@ pub struct TorrentTracker { keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, - stats_event_sender: Option>, - stats_repository: StatsRepository, + stats_event_sender: Option>, + stats_repository: statistics::Repo, database: Box, } impl TorrentTracker { pub fn new( config: Arc, - stats_event_sender: Option>, - stats_repository: StatsRepository, + stats_event_sender: Option>, + stats_repository: statistics::Repo, ) -> Result { let database = database::connect(&config.db_driver, &config.db_path)?; @@ -244,11 +243,11 @@ impl TorrentTracker { self.torrents.read().await } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, statistics::Metrics> { self.stats_repository.get_stats().await } - pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { + pub async fn send_stats_event(&self, event: statistics::Event) -> Option>> { match &self.stats_event_sender { None => None, Some(stats_event_sender) => stats_event_sender.send_event(event).await, diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index fd830fa88..b787e1267 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -5,13 +5,12 @@ use log::debug; #[cfg(test)] use mockall::{automock, predicate::str}; use tokio::sync::mpsc::error::SendError; -use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; #[derive(Debug, PartialEq, Eq)] -pub enum TrackerStatisticsEvent { +pub enum Event { Tcp4Announce, Tcp4Scrape, Tcp6Announce, @@ -66,91 +65,89 @@ impl Metrics { } } -pub struct StatsTracker { - pub stats_repository: StatsRepository, +pub struct Keeper { + pub repository: Repo, } -impl Default for StatsTracker { +impl Default for Keeper { fn default() -> Self { Self::new() } } -impl StatsTracker { +impl Keeper { #[must_use] pub fn new() -> Self { - Self { - stats_repository: StatsRepository::new(), - } + Self { repository: Repo::new() } } #[must_use] - pub fn new_active_instance() -> (Box, StatsRepository) { + pub fn new_active_instance() -> (Box, Repo) { let mut stats_tracker = Self::new(); let stats_event_sender = stats_tracker.run_event_listener(); - (stats_event_sender, stats_tracker.stats_repository) + (stats_event_sender, stats_tracker.repository) } - pub fn run_event_listener(&mut self) -> Box { - let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - let stats_repository = self.stats_repository.clone(); + let stats_repository = self.repository.clone(); tokio::spawn(async move { event_listener(receiver, stats_repository).await }); - Box::new(StatsEventSender { sender }) + Box::new(Sender { sender }) } } -async fn event_listener(mut receiver: Receiver, stats_repository: StatsRepository) { +async fn event_listener(mut receiver: mpsc::Receiver, stats_repository: Repo) { while let Some(event) = receiver.recv().await { event_handler(event, &stats_repository).await; } } -async fn event_handler(event: TrackerStatisticsEvent, stats_repository: &StatsRepository) { +async fn event_handler(event: Event, stats_repository: &Repo) { match event { // TCP4 - TrackerStatisticsEvent::Tcp4Announce => { + Event::Tcp4Announce => { stats_repository.increase_tcp4_announces().await; stats_repository.increase_tcp4_connections().await; } - TrackerStatisticsEvent::Tcp4Scrape => { + Event::Tcp4Scrape => { stats_repository.increase_tcp4_scrapes().await; stats_repository.increase_tcp4_connections().await; } // TCP6 - TrackerStatisticsEvent::Tcp6Announce => { + Event::Tcp6Announce => { stats_repository.increase_tcp6_announces().await; stats_repository.increase_tcp6_connections().await; } - TrackerStatisticsEvent::Tcp6Scrape => { + Event::Tcp6Scrape => { stats_repository.increase_tcp6_scrapes().await; stats_repository.increase_tcp6_connections().await; } // UDP4 - TrackerStatisticsEvent::Udp4Connect => { + Event::Udp4Connect => { stats_repository.increase_udp4_connections().await; } - TrackerStatisticsEvent::Udp4Announce => { + Event::Udp4Announce => { stats_repository.increase_udp4_announces().await; } - TrackerStatisticsEvent::Udp4Scrape => { + Event::Udp4Scrape => { stats_repository.increase_udp4_scrapes().await; } // UDP6 - TrackerStatisticsEvent::Udp6Connect => { + Event::Udp6Connect => { stats_repository.increase_udp6_connections().await; } - TrackerStatisticsEvent::Udp6Announce => { + Event::Udp6Announce => { stats_repository.increase_udp6_announces().await; } - TrackerStatisticsEvent::Udp6Scrape => { + Event::Udp6Scrape => { stats_repository.increase_udp6_scrapes().await; } } @@ -160,33 +157,33 @@ async fn event_handler(event: TrackerStatisticsEvent, stats_repository: &StatsRe #[async_trait] #[cfg_attr(test, automock)] -pub trait TrackerStatisticsEventSender: Sync + Send { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>>; +pub trait EventSender: Sync + Send { + async fn send_event(&self, event: Event) -> Option>>; } -pub struct StatsEventSender { - sender: Sender, +pub struct Sender { + sender: mpsc::Sender, } #[async_trait] -impl TrackerStatisticsEventSender for StatsEventSender { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { +impl EventSender for Sender { + async fn send_event(&self, event: Event) -> Option>> { Some(self.sender.send(event).await) } } #[derive(Clone)] -pub struct StatsRepository { +pub struct Repo { pub stats: Arc>, } -impl Default for StatsRepository { +impl Default for Repo { fn default() -> Self { Self::new() } } -impl StatsRepository { +impl Repo { #[must_use] pub fn new() -> Self { Self { @@ -275,37 +272,37 @@ impl StatsRepository { mod tests { mod stats_tracker { - use crate::tracker::statistics::{Metrics, StatsTracker, TrackerStatisticsEvent}; + use crate::tracker::statistics::{Event, Keeper, Metrics}; #[tokio::test] async fn should_contain_the_tracker_statistics() { - let stats_tracker = StatsTracker::new(); + let stats_tracker = Keeper::new(); - let stats = stats_tracker.stats_repository.get_stats().await; + let stats = stats_tracker.repository.get_stats().await; assert_eq!(stats.tcp4_announces_handled, Metrics::new().tcp4_announces_handled); } #[tokio::test] async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = StatsTracker::new(); + let mut stats_tracker = Keeper::new(); let event_sender = stats_tracker.run_event_listener(); - let result = event_sender.send_event(TrackerStatisticsEvent::Udp4Connect).await; + let result = event_sender.send_event(Event::Udp4Connect).await; assert!(result.is_some()); } } mod event_handler { - use crate::tracker::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{event_handler, Event, Repo}; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + event_handler(Event::Tcp4Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -314,9 +311,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + event_handler(Event::Tcp4Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -325,9 +322,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + event_handler(Event::Tcp4Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -336,9 +333,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + event_handler(Event::Tcp4Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -347,9 +344,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + event_handler(Event::Tcp6Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -358,9 +355,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + event_handler(Event::Tcp6Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -369,9 +366,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + event_handler(Event::Tcp6Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -380,9 +377,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + event_handler(Event::Tcp6Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -391,9 +388,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp4Connect, &stats_repository).await; + event_handler(Event::Udp4Connect, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -402,9 +399,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp4Announce, &stats_repository).await; + event_handler(Event::Udp4Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -413,9 +410,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp4Scrape, &stats_repository).await; + event_handler(Event::Udp4Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -424,9 +421,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp6Connect, &stats_repository).await; + event_handler(Event::Udp6Connect, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -435,9 +432,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp6Announce, &stats_repository).await; + event_handler(Event::Udp6Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -446,9 +443,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp6Scrape, &stats_repository).await; + event_handler(Event::Udp6Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index d1ae72924..f460c1b7e 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,9 +8,8 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; -use crate::tracker::{peer, TorrentTracker}; +use crate::tracker::{peer, statistics, TorrentTracker}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; @@ -79,10 +78,10 @@ pub async fn handle_connect( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp4Connect).await; + tracker.send_stats_event(statistics::Event::Udp4Connect).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp6Connect).await; + tracker.send_stats_event(statistics::Event::Udp6Connect).await; } } @@ -167,10 +166,10 @@ pub async fn handle_announce( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp4Announce).await; + tracker.send_stats_event(statistics::Event::Udp4Announce).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp6Announce).await; + tracker.send_stats_event(statistics::Event::Udp6Announce).await; } } @@ -223,10 +222,10 @@ pub async fn handle_scrape( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; + tracker.send_stats_event(statistics::Event::Udp4Scrape).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp6Scrape).await; + tracker.send_stats_event(statistics::Event::Udp6Scrape).await; } } @@ -254,8 +253,7 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; - use crate::tracker::statistics::StatsTracker; - use crate::tracker::{mode, peer, TorrentTracker}; + use crate::tracker::{mode, peer, statistics, TorrentTracker}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) @@ -281,7 +279,7 @@ mod tests { } fn initialized_tracker(configuration: Arc) -> Arc { - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) } @@ -375,8 +373,7 @@ mod tests { use mockall::predicate::eq; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -427,10 +424,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp4Connect)) + .with(eq(statistics::Event::Udp4Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -438,7 +435,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await @@ -447,16 +444,16 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp6Connect)) + .with(eq(statistics::Event::Udp6Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await @@ -548,8 +545,7 @@ mod tests { use mockall::predicate::eq; use crate::protocol::common::PeerId; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -692,16 +688,16 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp4Announce)) + .with(eq(statistics::Event::Udp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -773,8 +769,7 @@ mod tests { use mockall::predicate::eq; use crate::protocol::common::PeerId; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -924,16 +919,16 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp6Announce)) + .with(eq(statistics::Event::Udp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -953,7 +948,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::tracker::statistics::StatsTracker; + use crate::tracker::statistics::Keeper; use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; @@ -963,7 +958,7 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); @@ -1233,24 +1228,23 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp4Scrape)) + .with(eq(statistics::Event::Udp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1266,24 +1260,23 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::tracker::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{statistics, TorrentTracker}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp6Scrape)) + .with(eq(statistics::Event::Udp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/tests/api.rs b/tests/api.rs index 380ab90ca..a4043fe7c 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -25,7 +25,7 @@ mod tracker_api { use torrust_tracker::protocol::common::{InfoHash, PeerId}; use torrust_tracker::tracker::key::Auth; use torrust_tracker::tracker::peer::TorrentPeer; - use torrust_tracker::tracker::statistics::StatsTracker; + use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; @@ -276,7 +276,7 @@ mod tracker_api { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); // Initialize stats tracker - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { diff --git a/tests/udp.rs b/tests/udp.rs index b365c4fc6..fabca137a 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -20,7 +20,7 @@ mod udp_tracker_server { use tokio::task::JoinHandle; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::udp_tracker; - use torrust_tracker::tracker::statistics::StatsTracker; + use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; @@ -58,7 +58,7 @@ mod udp_tracker_server { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); // Initialize stats tracker - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { From 81e72da07fe4359384c058540c3c33c6353b1ad2 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 22:26:32 +0100 Subject: [PATCH 182/435] clippy: fix src/tracker/torrent.rs --- src/http/handlers.rs | 19 ++++++++--------- src/tracker/mod.rs | 21 +++++++++---------- src/tracker/torrent.rs | 47 +++++++++++++++++++++--------------------- src/udp/handlers.rs | 15 +++++++------- 4 files changed, 50 insertions(+), 52 deletions(-) diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 5256ef291..ace20ada9 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,26 +13,25 @@ use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::{peer, statistics, TorrentTracker}; +use crate::tracker::{peer, statistics, torrent, TorrentTracker}; /// Authenticate `InfoHash` using optional `AuthKey` /// /// # Errors /// -/// Will return `ServerError` that wraps the `TorrentError` if unable to `authenticate_request`. +/// Will return `ServerError` that wraps the `Error` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, auth_key: &Option, tracker: Arc, ) -> Result<(), ServerError> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, + torrent::Error::NoPeersFound => ServerError::NoPeersFound, + torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, + torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, }) } @@ -140,7 +139,7 @@ pub async fn handle_scrape( #[allow(clippy::ptr_arg)] fn send_announce_response( announce_request: &request::Announce, - torrent_stats: &TorrentStats, + torrent_stats: &torrent::Stats, peers: &Vec, interval: u32, interval_min: u32, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index d0ab3e514..b3a7ab6d6 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -18,14 +18,13 @@ use crate::databases::database; use crate::databases::database::Database; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; pub struct TorrentTracker { pub config: Arc, mode: mode::Tracker, keys: RwLock>, whitelist: RwLock>, - torrents: RwLock>, + torrents: RwLock>, stats_event_sender: Option>, stats_repository: statistics::Repo, database: Box, @@ -142,7 +141,7 @@ impl TorrentTracker { Ok(()) } - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { // no authentication needed in public mode if self.is_public() { return Ok(()); @@ -153,18 +152,18 @@ impl TorrentTracker { match key { Some(key) => { if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid); + return Err(torrent::Error::PeerKeyNotValid); } } None => { - return Err(TorrentError::PeerNotAuthenticated); + return Err(torrent::Error::PeerNotAuthenticated); } } } // check if info_hash is whitelisted if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted); + return Err(torrent::Error::TorrentNotWhitelisted); } Ok(()) @@ -181,7 +180,7 @@ impl TorrentTracker { continue; } - let torrent_entry = TorrentEntry { + let torrent_entry = torrent::Entry { peers: Default::default(), completed, }; @@ -212,11 +211,11 @@ impl TorrentTracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::TorrentPeer) -> TorrentStats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::TorrentPeer) -> torrent::Stats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { - Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), + Entry::Vacant(vacant) => vacant.insert(torrent::Entry::new()), Entry::Occupied(entry) => entry.into_mut(), }; @@ -232,14 +231,14 @@ impl TorrentTracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); - TorrentStats { + torrent::Stats { completed, seeders, leechers, } } - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { + pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { self.torrents.read().await } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 734e7a66c..21bcfc513 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -9,16 +9,16 @@ use crate::protocol::clock::{Current, TimeNow}; use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; #[derive(Serialize, Deserialize, Clone, Debug)] -pub struct TorrentEntry { +pub struct Entry { #[serde(skip)] pub peers: std::collections::BTreeMap, pub completed: u32, } -impl TorrentEntry { +impl Entry { #[must_use] - pub fn new() -> TorrentEntry { - TorrentEntry { + pub fn new() -> Entry { + Entry { peers: std::collections::BTreeMap::new(), completed: 0, } @@ -72,6 +72,7 @@ impl TorrentEntry { .collect() } + #[allow(clippy::cast_possible_truncation)] #[must_use] pub fn get_stats(&self) -> (u32, u32, u32) { let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; @@ -85,21 +86,21 @@ impl TorrentEntry { } } -impl Default for TorrentEntry { +impl Default for Entry { fn default() -> Self { Self::new() } } #[derive(Debug)] -pub struct TorrentStats { +pub struct Stats { pub completed: u32, pub seeders: u32, pub leechers: u32, } #[derive(Debug)] -pub enum TorrentError { +pub enum Error { TorrentNotWhitelisted, PeerNotAuthenticated, PeerKeyNotValid, @@ -119,7 +120,7 @@ mod tests { use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; use crate::protocol::common::PeerId; use crate::tracker::peer; - use crate::tracker::torrent::TorrentEntry; + use crate::tracker::torrent::Entry; struct TorrentPeerBuilder { peer: peer::TorrentPeer, @@ -189,14 +190,14 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = TorrentEntry::new(); + let torrent_entry = Entry::new(); assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -207,7 +208,7 @@ mod tests { #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -217,7 +218,7 @@ mod tests { #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -229,7 +230,7 @@ mod tests { #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -241,7 +242,7 @@ mod tests { #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -255,7 +256,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -266,7 +267,7 @@ mod tests { #[test] fn a_torrent_entry_could_filter_out_peers_with_a_given_socket_address() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.update_peer(&torrent_peer); // Add peer @@ -287,7 +288,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -299,12 +300,12 @@ mod tests { let peers = torrent_entry.get_peers(None); - assert_eq!(peers.len(), 74) + assert_eq!(peers.len(), 74); } #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_seeder = a_torrent_seeder(); torrent_entry.update_peer(&torrent_seeder); // Add seeder @@ -314,7 +315,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_leecher = a_torrent_leecher(); torrent_entry.update_peer(&torrent_leecher); // Add leecher @@ -325,7 +326,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -340,7 +341,7 @@ mod tests { #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. @@ -354,7 +355,7 @@ mod tests { #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let timeout = 120u32; diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index f460c1b7e..632180a92 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,8 +8,7 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::torrent::TorrentError; -use crate::tracker::{peer, statistics, TorrentTracker}; +use crate::tracker::{peer, statistics, torrent, TorrentTracker}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; @@ -18,12 +17,12 @@ pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Ok(_) => Ok(()), Err(e) => { let err = match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, + torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, + torrent::Error::NoPeersFound => ServerError::NoPeersFound, + torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, + torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, }; Err(err) From 78221b6174a435ea815f9aefa1f84aa6d9ea4f8e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 24 Nov 2022 22:41:46 +0100 Subject: [PATCH 183/435] clippy: fix src/tracker/mod.rs --- src/api/server.rs | 22 ++++++------- src/http/filters.rs | 10 +++--- src/http/handlers.rs | 8 ++--- src/http/routes.rs | 8 ++--- src/http/server.rs | 8 ++--- src/jobs/http_tracker.rs | 4 +-- src/jobs/torrent_cleanup.rs | 4 +-- src/jobs/tracker_api.rs | 4 +-- src/jobs/udp_tracker.rs | 4 +-- src/main.rs | 5 ++- src/setup.rs | 4 +-- src/tracker/mod.rs | 64 +++++++++++++++++++++++++++-------- src/udp/handlers.rs | 66 ++++++++++++++++++------------------- src/udp/server.rs | 6 ++-- tests/api.rs | 7 ++-- tests/udp.rs | 5 ++- 16 files changed, 133 insertions(+), 96 deletions(-) diff --git a/src/api/server.rs b/src/api/server.rs index f9e5bc368..fac25e297 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -11,7 +11,7 @@ use super::resources::auth_key_resource::AuthKeyResource; use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use crate::protocol::common::InfoHash; -use crate::tracker::TorrentTracker; +use crate::tracker; #[derive(Deserialize, Debug)] struct TorrentInfoQuery { @@ -60,7 +60,7 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> impl warp::Future { +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -72,7 +72,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = api_torrents.clone(); (limits, tracker) }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { + .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { let offset = limits.offset.unwrap_or(0); let limit = min(limits.limit.unwrap_or(1000), 4000); @@ -103,7 +103,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war .and(filters::path::path("stats")) .and(filters::path::end()) .map(move || api_stats.clone()) - .and_then(|tracker: Arc| async move { + .and_then(|tracker: Arc| async move { let mut results = StatsResource { torrents: 0, seeders: 0, @@ -165,7 +165,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t2.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { let db = tracker.get_torrents().await; let torrent_entry_option = db.get(&info_hash); @@ -201,7 +201,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t3.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { match tracker.remove_torrent_from_whitelist(&info_hash).await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { @@ -221,7 +221,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t4.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { match tracker.add_torrent_to_whitelist(&info_hash).await { Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(..) => Err(warp::reject::custom(ActionStatus::Err { @@ -241,7 +241,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t5.clone(); (seconds_valid, tracker) }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { + .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from(auth_key))), Err(..) => Err(warp::reject::custom(ActionStatus::Err { @@ -261,7 +261,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war let tracker = t6.clone(); (key, tracker) }) - .and_then(|(key, tracker): (String, Arc)| async move { + .and_then(|(key, tracker): (String, Arc)| async move { match tracker.remove_auth_key(&key).await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { @@ -278,7 +278,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war .and(filters::path::path("reload")) .and(filters::path::end()) .map(move || t7.clone()) - .and_then(|tracker: Arc| async move { + .and_then(|tracker: Arc| async move { match tracker.load_whitelist().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { @@ -295,7 +295,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl war .and(filters::path::path("reload")) .and(filters::path::end()) .map(move || t8.clone()) - .and_then(|tracker: Arc| async move { + .and_then(|tracker: Arc| async move { match tracker.load_keys().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { diff --git a/src/http/filters.rs b/src/http/filters.rs index 3375c781f..2c3ab626d 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -9,12 +9,14 @@ use super::errors::ServerError; use super::request::{Announce, AnnounceRequestQuery, Scrape}; use super::WebResult; use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; +use crate::tracker; use crate::tracker::key::Auth; -use crate::tracker::TorrentTracker; -/// Pass Arc along +/// Pass Arc along #[must_use] -pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { +pub fn with_tracker( + tracker: Arc, +) -> impl Filter,), Error = Infallible> + Clone { warp::any().map(move || tracker.clone()) } @@ -30,7 +32,7 @@ pub fn with_peer_id() -> impl Filter + C warp::filters::query::raw().and_then(|q| async move { peer_id(&q) }) } -/// Pass Arc along +/// Pass Arc along #[must_use] pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() diff --git a/src/http/handlers.rs b/src/http/handlers.rs index ace20ada9..d4ae76e65 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -13,7 +13,7 @@ use super::{request, WebResult}; use crate::http::response::Error; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -use crate::tracker::{peer, statistics, torrent, TorrentTracker}; +use crate::tracker::{self, peer, statistics, torrent}; /// Authenticate `InfoHash` using optional `AuthKey` /// @@ -23,7 +23,7 @@ use crate::tracker::{peer, statistics, torrent, TorrentTracker}; pub async fn authenticate( info_hash: &InfoHash, auth_key: &Option, - tracker: Arc, + tracker: Arc, ) -> Result<(), ServerError> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, @@ -43,7 +43,7 @@ pub async fn authenticate( pub async fn handle_announce( announce_request: request::Announce, auth_key: Option, - tracker: Arc, + tracker: Arc, ) -> WebResult { authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) .await @@ -89,7 +89,7 @@ pub async fn handle_announce( pub async fn handle_scrape( scrape_request: request::Scrape, auth_key: Option, - tracker: Arc, + tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; diff --git a/src/http/routes.rs b/src/http/routes.rs index 992febc2c..c46c502e4 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -5,18 +5,18 @@ use warp::{Filter, Rejection}; use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; use super::handlers::{handle_announce, handle_scrape, send_error}; -use crate::tracker::TorrentTracker; +use crate::tracker; /// All routes #[must_use] -pub fn routes(tracker: Arc) -> impl Filter + Clone { +pub fn routes(tracker: Arc) -> impl Filter + Clone { announce(tracker.clone()) .or(scrape(tracker)) .recover(|q| async move { send_error(&q) }) } /// GET /announce or /announce/ -fn announce(tracker: Arc) -> impl Filter + Clone { +fn announce(tracker: Arc) -> impl Filter + Clone { warp::path::path("announce") .and(warp::filters::method::get()) .and(with_announce_request(tracker.config.on_reverse_proxy)) @@ -26,7 +26,7 @@ fn announce(tracker: Arc) -> impl Filter -fn scrape(tracker: Arc) -> impl Filter + Clone { +fn scrape(tracker: Arc) -> impl Filter + Clone { warp::path::path("scrape") .and(warp::filters::method::get()) .and(with_scrape_request(tracker.config.on_reverse_proxy)) diff --git a/src/http/server.rs b/src/http/server.rs index 755fdc73a..894d3e911 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -2,17 +2,17 @@ use std::net::SocketAddr; use std::sync::Arc; use super::routes; -use crate::tracker::TorrentTracker; +use crate::tracker; -/// Server that listens on HTTP, needs a `TorrentTracker` +/// Server that listens on HTTP, needs a `tracker::TorrentTracker` #[derive(Clone)] pub struct Http { - tracker: Arc, + tracker: Arc, } impl Http { #[must_use] - pub fn new(tracker: Arc) -> Http { + pub fn new(tracker: Arc) -> Http { Http { tracker } } diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 276da8099..b8f031f5a 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -6,13 +6,13 @@ use tokio::task::JoinHandle; use crate::config::HttpTracker; use crate::http::server::Http; -use crate::tracker::TorrentTracker; +use crate::tracker; /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. #[must_use] -pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { +pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; let ssl_cert_path = config.ssl_cert_path.clone(); diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 7bdfc1677..073ceda61 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -5,10 +5,10 @@ use log::info; use tokio::task::JoinHandle; use crate::config::Configuration; -use crate::tracker::TorrentTracker; +use crate::tracker; #[must_use] -pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { +pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 4e2dcd0c9..7787ea3f4 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -6,12 +6,12 @@ use tokio::task::JoinHandle; use crate::api::server; use crate::config::Configuration; -use crate::tracker::TorrentTracker; +use crate::tracker; #[derive(Debug)] pub struct ApiServerJobStarted(); -pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { +pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .http_api .bind_address diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 1b4bc745c..d5fdae4c1 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -4,11 +4,11 @@ use log::{error, info, warn}; use tokio::task::JoinHandle; use crate::config::UdpTracker; -use crate::tracker::TorrentTracker; +use crate::tracker; use crate::udp::server::UdpServer; #[must_use] -pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { +pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); tokio::spawn(async move { diff --git a/src/main.rs b/src/main.rs index baffc6fa5..a7316cef2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,8 +3,7 @@ use std::sync::Arc; use log::info; use torrust_tracker::config::Configuration; use torrust_tracker::stats::setup_statistics; -use torrust_tracker::tracker::TorrentTracker; -use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time}; +use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, tracker}; #[tokio::main] async fn main() { @@ -28,7 +27,7 @@ async fn main() { let (stats_event_sender, stats_repository) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(config.clone(), stats_event_sender, stats_repository) { + let tracker = match tracker::Tracker::new(&config.clone(), stats_event_sender, stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/setup.rs b/src/setup.rs index cfca5eb9e..a7b7c5a82 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -5,9 +5,9 @@ use tokio::task::JoinHandle; use crate::config::Configuration; use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; -use crate::tracker::TorrentTracker; +use crate::tracker; -pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { +pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); // Load peer keys diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index b3a7ab6d6..fcd9ebe2d 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -19,7 +19,7 @@ use crate::databases::database::Database; use crate::protocol::common::InfoHash; use crate::tracker::key::Auth; -pub struct TorrentTracker { +pub struct Tracker { pub config: Arc, mode: mode::Tracker, keys: RwLock>, @@ -30,15 +30,18 @@ pub struct TorrentTracker { database: Box, } -impl TorrentTracker { +impl Tracker { + /// # Errors + /// + /// Will return a `r2d2::Error` if unable to connect to database. pub fn new( - config: Arc, + config: &Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, - ) -> Result { + ) -> Result { let database = database::connect(&config.db_driver, &config.db_path)?; - Ok(TorrentTracker { + Ok(Tracker { config: config.clone(), mode: config.mode, keys: RwLock::new(std::collections::HashMap::new()), @@ -62,6 +65,9 @@ impl TorrentTracker { self.mode == mode::Tracker::Listed || self.mode == mode::Tracker::PrivateListed } + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = key::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; @@ -69,12 +75,18 @@ impl TorrentTracker { Ok(auth_key) } + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `key` to the database. pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(key); Ok(()) } + /// # Errors + /// + /// Will return a `key::Error` if unable to get any `auth_key`. pub async fn verify_auth_key(&self, auth_key: &Auth) -> Result<(), key::Error> { match self.keys.read().await.get(&auth_key.key) { None => Err(key::Error::KeyInvalid), @@ -82,6 +94,9 @@ impl TorrentTracker { } } + /// # Errors + /// + /// Will return a `database::Error` if unable to `load_keys` from the database. pub async fn load_keys(&self) -> Result<(), database::Error> { let keys_from_database = self.database.load_keys().await?; let mut keys = self.keys.write().await; @@ -89,13 +104,17 @@ impl TorrentTracker { keys.clear(); for key in keys_from_database { - let _ = keys.insert(key.key.clone(), key); + keys.insert(key.key.clone(), key); } Ok(()) } - // Adding torrents is not relevant to public trackers. + /// Adding torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { self.add_torrent_to_database_whitelist(info_hash).await?; self.add_torrent_to_memory_whitelist(info_hash).await; @@ -117,7 +136,11 @@ impl TorrentTracker { self.whitelist.write().await.insert(*info_hash) } - // Removing torrents is not relevant to public trackers. + /// Removing torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { self.database.remove_info_hash_from_whitelist(*info_hash).await?; self.whitelist.write().await.remove(info_hash); @@ -128,6 +151,9 @@ impl TorrentTracker { self.whitelist.read().await.contains(info_hash) } + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. pub async fn load_whitelist(&self) -> Result<(), database::Error> { let whitelisted_torrents_from_database = self.database.load_whitelist().await?; let mut whitelist = self.whitelist.write().await; @@ -141,6 +167,13 @@ impl TorrentTracker { Ok(()) } + /// # Errors + /// + /// Will return a `torrent::Error::PeerKeyNotValid` if the `key` is not valid. + /// + /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. + /// + /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { // no authentication needed in public mode if self.is_public() { @@ -169,7 +202,11 @@ impl TorrentTracker { Ok(()) } - // Loading the torrents from database into memory + /// Loading the torrents from database into memory + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; let mut torrents = self.torrents.write().await; @@ -181,7 +218,7 @@ impl TorrentTracker { } let torrent_entry = torrent::Entry { - peers: Default::default(), + peers: BTreeMap::default(), completed, }; @@ -262,9 +299,10 @@ impl TorrentTracker { torrents_lock.retain(|_, torrent_entry| { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - match self.config.persistent_torrent_completed_stat { - true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), - false => !torrent_entry.peers.is_empty(), + if self.config.persistent_torrent_completed_stat { + torrent_entry.completed > 0 || !torrent_entry.peers.is_empty() + } else { + !torrent_entry.peers.is_empty() } }); } else { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 632180a92..bf34326c6 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,11 +8,11 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::{peer, statistics, torrent, TorrentTracker}; +use crate::tracker::{self, peer, statistics, torrent}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; -pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { +pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { match tracker.authenticate_request(info_hash, &None).await { Ok(_) => Ok(()), Err(e) => { @@ -30,7 +30,7 @@ pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> } } -pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { +pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { Ok(request) => { let transaction_id = match &request { @@ -52,7 +52,7 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A pub async fn handle_request( request: Request, remote_addr: SocketAddr, - tracker: Arc, + tracker: Arc, ) -> Result { match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, @@ -64,7 +64,7 @@ pub async fn handle_request( pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, - tracker: Arc, + tracker: Arc, ) -> Result { let connection_cookie = make_connection_cookie(&remote_addr); let connection_id = into_connection_id(&connection_cookie); @@ -90,7 +90,7 @@ pub async fn handle_connect( pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, - tracker: Arc, + tracker: Arc, ) -> Result { match check_connection_cookie(&remote_addr, &from_connection_id(&announce_request.connection_id)) { Ok(_) => {} @@ -179,7 +179,7 @@ pub async fn handle_announce( pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, - tracker: Arc, + tracker: Arc, ) -> Result { let db = tracker.get_torrents().await; @@ -252,18 +252,18 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::protocol::common::PeerId; - use crate::tracker::{mode, peer, statistics, TorrentTracker}; + use crate::tracker::{self, mode, peer, statistics}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) } - fn initialized_public_tracker() -> Arc { + fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Public).into()); initialized_tracker(configuration) } - fn initialized_private_tracker() -> Arc { + fn initialized_private_tracker() -> Arc { let configuration = Arc::new( TrackerConfigurationBuilder::default() .with_mode(mode::Tracker::Private) @@ -272,14 +272,14 @@ mod tests { initialized_tracker(configuration) } - fn initialized_whitelisted_tracker() -> Arc { + fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Listed).into()); initialized_tracker(configuration) } - fn initialized_tracker(configuration: Arc) -> Arc { + fn initialized_tracker(configuration: Arc) -> Arc { let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); - Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) + Arc::new(tracker::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -372,7 +372,7 @@ mod tests { use mockall::predicate::eq; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -434,7 +434,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await @@ -452,7 +452,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await @@ -544,7 +544,7 @@ mod tests { use mockall::predicate::eq; use crate::protocol::common::PeerId; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -641,7 +641,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { + async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -659,7 +659,7 @@ mod tests { .await; } - async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) @@ -696,7 +696,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -768,7 +768,7 @@ mod tests { use mockall::predicate::eq; use crate::protocol::common::PeerId; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -870,7 +870,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { + async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -887,7 +887,7 @@ mod tests { .await; } - async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; @@ -927,7 +927,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -947,8 +947,8 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::tracker; use crate::tracker::statistics::Keeper; - use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -959,7 +959,7 @@ mod tests { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = - Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); + Arc::new(tracker::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1010,7 +1010,7 @@ mod tests { use super::TorrentPeerBuilder; use crate::protocol::common::PeerId; - use crate::tracker::TorrentTracker; + use crate::tracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -1051,7 +1051,7 @@ mod tests { ); } - async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { let peer_id = PeerId([255u8; 20]); let peer = TorrentPeerBuilder::default() @@ -1075,7 +1075,7 @@ mod tests { } } - async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1227,7 +1227,7 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; @@ -1243,7 +1243,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1259,7 +1259,7 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::tracker::{statistics, TorrentTracker}; + use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; @@ -1275,7 +1275,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/src/udp/server.rs b/src/udp/server.rs index 5c215f9ec..705a6c263 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -6,17 +6,17 @@ use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::tracker::TorrentTracker; +use crate::tracker; use crate::udp::handlers::handle_packet; use crate::udp::MAX_PACKET_SIZE; pub struct UdpServer { socket: Arc, - tracker: Arc, + tracker: Arc, } impl UdpServer { - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { + pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { let socket = UdpSocket::bind(bind_address).await?; Ok(UdpServer { diff --git a/tests/api.rs b/tests/api.rs index a4043fe7c..72c3c65c7 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -26,8 +26,7 @@ mod tracker_api { use torrust_tracker::tracker::key::Auth; use torrust_tracker::tracker::peer::TorrentPeer; use torrust_tracker::tracker::statistics::Keeper; - use torrust_tracker::tracker::TorrentTracker; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; @@ -237,7 +236,7 @@ mod tracker_api { struct ApiServer { pub started: AtomicBool, pub job: Option>, - pub tracker: Option>, + pub tracker: Option>, pub connection_info: Option, } @@ -279,7 +278,7 @@ mod tracker_api { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/tests/udp.rs b/tests/udp.rs index fabca137a..e93894843 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -21,9 +21,8 @@ mod udp_tracker_server { use torrust_tracker::config::Configuration; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::Keeper; - use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; @@ -61,7 +60,7 @@ mod udp_tracker_server { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) From 4a34f685fc3c88e01269ab68b805f07a25ab4a03 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 02:10:57 +0100 Subject: [PATCH 184/435] clippy: fix src/udp/connection_cookie.rs --- src/tracker/peer.rs | 5 ++-- src/udp/connection_cookie.rs | 54 +++++++++++++++++++----------------- src/udp/handlers.rs | 54 ++++++++++++++++++------------------ 3 files changed, 59 insertions(+), 54 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index d590b590d..dd49ffaa7 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -131,7 +131,8 @@ mod test { }; use crate::tracker::peer::TorrentPeer; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; + // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. fn sample_ipv4_remote_addr() -> SocketAddr { @@ -153,7 +154,7 @@ mod test { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId(*b"-qB00000000000000000"), diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 5a1e564dd..8a544fa6a 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -22,17 +22,21 @@ pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { } #[must_use] -pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { +pub fn make(remote_address: &SocketAddr) -> Cookie { let time_extent = cookie_builder::get_last_time_extent(); //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); cookie_builder::build(remote_address, &time_extent) } -pub fn check_connection_cookie( - remote_address: &SocketAddr, - connection_cookie: &Cookie, -) -> Result { +/// # Panics +/// +/// It would panic if the `COOKIE_LIFETIME` constant would be an unreasonably large number. +/// +/// # Errors +/// +/// Will return a `ServerError::InvalidConnectionId` if the supplied `connection_cookie` fails to verify. +pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { // we loop backwards testing each time_extent until we find one that matches. // (or the lifetime of time_extents is exhausted) for offset in 0..=COOKIE_LIFETIME.amount { @@ -85,19 +89,19 @@ mod tests { use super::cookie_builder::{self}; use crate::protocol::clock::time_extent::{self, Extent}; use crate::protocol::clock::{Stopped, StoppedTime}; - use crate::udp::connection_cookie::{check_connection_cookie, make_connection_cookie, Cookie, COOKIE_LIFETIME}; + use crate::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); #[test] fn it_should_make_a_connection_cookie() { - let cookie = make_connection_cookie(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); - // Note: This constant may need to be updated in the future as the hash is not guaranteed to to be stable between versions. const ID_COOKIE: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; - assert_eq!(cookie, ID_COOKIE) + let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); + + assert_eq!(cookie, ID_COOKIE); } #[test] @@ -114,7 +118,7 @@ mod tests { //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] - assert_eq!(cookie, cookie_2) + assert_eq!(cookie, cookie_2); } #[test] @@ -132,7 +136,7 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: 255.255.255.255:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [217, 87, 239, 178, 182, 126, 66, 166] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] @@ -150,7 +154,7 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: [::]:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [99, 119, 230, 177, 20, 220, 163, 187] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] @@ -168,7 +172,7 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: 0.0.0.0:1, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [38, 8, 0, 102, 92, 170, 220, 11] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] @@ -186,51 +190,51 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 18446744073709551615.999999999s, amount: 18446744073709551615 }, cookie: [87, 111, 109, 125, 182, 206, 3, 201] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] fn it_should_make_different_cookies_for_the_next_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); - let cookie_next = make_connection_cookie(&remote_address); + let cookie_next = make(&remote_address); - assert_ne!(cookie, cookie_next) + assert_ne!(cookie, cookie_next); } #[test] fn it_should_be_valid_for_this_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } #[test] fn it_should_be_valid_for_the_next_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } #[test] fn it_should_be_valid_for_the_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } #[test] @@ -238,10 +242,10 @@ mod tests { fn it_should_be_not_valid_after_their_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index bf34326c6..cdf12ed6b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -6,7 +6,7 @@ use aquatic_udp_protocol::{ NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; +use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; use crate::tracker::{self, peer, statistics, torrent}; use crate::udp::errors::ServerError; @@ -66,7 +66,7 @@ pub async fn handle_connect( request: &ConnectRequest, tracker: Arc, ) -> Result { - let connection_cookie = make_connection_cookie(&remote_addr); + let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); let response = Response::from(ConnectResponse { @@ -92,7 +92,7 @@ pub async fn handle_announce( announce_request: &AnnounceRequest, tracker: Arc, ) -> Result { - match check_connection_cookie(&remote_addr, &from_connection_id(&announce_request.connection_id)) { + match check(&remote_addr, &from_connection_id(&announce_request.connection_id)) { Ok(_) => {} Err(e) => { return Err(e); @@ -373,7 +373,7 @@ mod tests { use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; use crate::tracker::{self, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -396,7 +396,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -415,7 +415,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -469,7 +469,7 @@ mod tests { TransactionId, }; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::tests::sample_ipv4_remote_addr; struct AnnounceRequestBuilder { @@ -483,7 +483,7 @@ mod tests { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId([255u8; 20]), @@ -545,7 +545,7 @@ mod tests { use crate::protocol::common::PeerId; use crate::tracker::{self, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ @@ -564,7 +564,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -588,7 +588,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -627,7 +627,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -662,7 +662,7 @@ mod tests { async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() @@ -714,7 +714,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use crate::protocol::common::PeerId; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; @@ -731,7 +731,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -769,7 +769,7 @@ mod tests { use crate::protocol::common::PeerId; use crate::tracker::{self, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ @@ -789,7 +789,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -816,7 +816,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -855,7 +855,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -893,7 +893,7 @@ mod tests { let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() @@ -933,7 +933,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); handle_announce(remote_addr, &announce_request, tracker.clone()) @@ -949,7 +949,7 @@ mod tests { use crate::tracker; use crate::tracker::statistics::Keeper; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::TrackerConfigurationBuilder; @@ -974,7 +974,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -1011,7 +1011,7 @@ mod tests { use super::TorrentPeerBuilder; use crate::protocol::common::PeerId; use crate::tracker; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -1031,7 +1031,7 @@ mod tests { let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), + connection_id: into_connection_id(&make(&remote_addr)), transaction_id: TransactionId(0i32), info_hashes, }; @@ -1069,7 +1069,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(remote_addr)), + connection_id: into_connection_id(&make(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } @@ -1214,7 +1214,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(remote_addr)), + connection_id: into_connection_id(&make(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } From 6e2a34226b1dc17f17e9420dd25e47255fd45fe2 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 19:35:53 +0100 Subject: [PATCH 185/435] clippy: fix src/databases/database.rs --- src/databases/database.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/databases/database.rs b/src/databases/database.rs index 5186f96b3..5e4a7c1f9 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -61,14 +61,15 @@ pub trait Database: Sync + Send { async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - if let Err(e) = self.get_info_hash_from_whitelist(&info_hash.clone().to_string()).await { - if let Error::QueryReturnedNoRows = e { - return Ok(false); - } else { - return Err(e); - } - } - Ok(true) + self.get_info_hash_from_whitelist(&info_hash.clone().to_string()) + .await + .map_or_else( + |e| match e { + Error::QueryReturnedNoRows => Ok(false), + e => Err(e), + }, + |_| Ok(true), + ) } } From 0a7d9276b4958f56a5e4099c24cee08ed2c8084a Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 19:44:59 +0100 Subject: [PATCH 186/435] clippy: fix src/jobs/tracker_api.rs --- src/jobs/tracker_api.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 7787ea3f4..2c00aa453 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -11,6 +11,9 @@ use crate::tracker; #[derive(Debug)] pub struct ApiServerJobStarted(); +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .http_api @@ -26,7 +29,7 @@ pub async fn start_job(config: &Configuration, tracker: Arc) - let join_handle = tokio::spawn(async move { let handel = server::start(bind_addr, &tracker); - assert!(tx.send(ApiServerJobStarted()).is_ok(), "the start job dropped"); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); handel.await; }); @@ -34,7 +37,7 @@ pub async fn start_job(config: &Configuration, tracker: Arc) - // Wait until the API server job is running match rx.await { Ok(_msg) => info!("Torrust API server started"), - Err(_) => panic!("the api server dropped"), + Err(e) => panic!("the api server dropped: {e}"), } join_handle From 220f83af3d6d98bd8fd181b14601b249fcc00772 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:14:12 +0100 Subject: [PATCH 187/435] clippy: fix src/udp/handlers.rs --- src/udp/errors.rs | 15 ++++++++ src/udp/handlers.rs | 84 +++++++++++++++++++++------------------------ 2 files changed, 54 insertions(+), 45 deletions(-) diff --git a/src/udp/errors.rs b/src/udp/errors.rs index 8d7b04b4f..f90149a99 100644 --- a/src/udp/errors.rs +++ b/src/udp/errors.rs @@ -1,5 +1,7 @@ use thiserror::Error; +use crate::tracker::torrent; + #[derive(Error, Debug)] pub enum ServerError { #[error("internal server error")] @@ -32,3 +34,16 @@ pub enum ServerError { #[error("bad request")] BadRequest, } + +impl From for ServerError { + fn from(e: torrent::Error) -> Self { + match e { + torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, + torrent::Error::NoPeersFound => ServerError::NoPeersFound, + torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, + torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, + } + } +} diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index cdf12ed6b..dde8d14ae 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -8,28 +8,10 @@ use aquatic_udp_protocol::{ use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; -use crate::tracker::{self, peer, statistics, torrent}; +use crate::tracker::{self, peer, statistics}; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; -pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { - match tracker.authenticate_request(info_hash, &None).await { - Ok(_) => Ok(()), - Err(e) => { - let err = match e { - torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, - torrent::Error::NoPeersFound => ServerError::NoPeersFound, - torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, - torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, - }; - - Err(err) - } - } -} - pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { Ok(request) => { @@ -41,14 +23,17 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A match handle_request(request, remote_addr, tracker).await { Ok(response) => response, - Err(e) => handle_error(e, transaction_id), + Err(e) => handle_error(&e, transaction_id), } } // bad request - Err(_) => handle_error(ServerError::BadRequest, TransactionId(0)), + Err(_) => handle_error(&ServerError::BadRequest, TransactionId(0)), } } +/// # Errors +/// +/// If a error happens in the `handle_request` function, it will just return the `ServerError`. pub async fn handle_request( request: Request, remote_addr: SocketAddr, @@ -61,6 +46,9 @@ pub async fn handle_request( } } +/// # Errors +/// +/// This function dose not ever return an error. pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, @@ -87,21 +75,21 @@ pub async fn handle_connect( Ok(response) } +/// # Errors +/// +/// If a error happens in the `handle_announce` function, it will just return the `ServerError`. pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: Arc, ) -> Result { - match check(&remote_addr, &from_connection_id(&announce_request.connection_id)) { - Ok(_) => {} - Err(e) => { - return Err(e); - } - } + check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); - authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; + tracker + .authenticate_request(&wrapped_announce_request.info_hash, &None) + .await?; let peer = peer::TorrentPeer::from_udp_announce_request( &wrapped_announce_request.announce_request, @@ -120,12 +108,13 @@ pub async fn handle_announce( .get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr) .await; + #[allow(clippy::cast_possible_truncation)] let announce_response = if remote_addr.is_ipv4() { Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), + announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), peers: peers .iter() .filter_map(|peer| { @@ -143,9 +132,9 @@ pub async fn handle_announce( } else { Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), + announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), peers: peers .iter() .filter_map(|peer| { @@ -175,7 +164,11 @@ pub async fn handle_announce( Ok(announce_response) } -// todo: refactor this, db lock can be a lot shorter +/// # Errors +/// +/// This function dose not ever return an error. +/// +/// TODO: refactor this, db lock can be a lot shorter pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, @@ -190,13 +183,14 @@ pub async fn handle_scrape( let scrape_entry = match db.get(&info_hash) { Some(torrent_info) => { - if authenticate(&info_hash, tracker.clone()).await.is_ok() { + if tracker.authenticate_request(&info_hash, &None).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); + #[allow(clippy::cast_possible_truncation)] TorrentScrapeStatistics { - seeders: NumberOfPeers(seeders as i32), - completed: NumberOfDownloads(completed as i32), - leechers: NumberOfPeers(leechers as i32), + seeders: NumberOfPeers(i64::from(seeders) as i32), + completed: NumberOfDownloads(i64::from(completed) as i32), + leechers: NumberOfPeers(i64::from(leechers) as i32), } } else { TorrentScrapeStatistics { @@ -234,7 +228,7 @@ pub async fn handle_scrape( })) } -fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { +fn handle_error(e: &ServerError, transaction_id: TransactionId) -> Response { let message = e.to_string(); Response::from(ErrorResponse { transaction_id, @@ -260,7 +254,7 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Public).into()); - initialized_tracker(configuration) + initialized_tracker(&configuration) } fn initialized_private_tracker() -> Arc { @@ -269,17 +263,17 @@ mod tests { .with_mode(mode::Tracker::Private) .into(), ); - initialized_tracker(configuration) + initialized_tracker(&configuration) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Listed).into()); - initialized_tracker(configuration) + initialized_tracker(&configuration) } - fn initialized_tracker(configuration: Arc) -> Arc { + fn initialized_tracker(configuration: &Arc) -> Arc { let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); - Arc::new(tracker::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()) + Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) } fn sample_ipv4_remote_addr() -> SocketAddr { From aa30bb1c933b6091d9eae1fe790b00a349a52a6d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:15:56 +0100 Subject: [PATCH 188/435] clippy: fix src/udp/request.rs --- src/udp/handlers.rs | 2 +- src/udp/request.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index dde8d14ae..274af1e2c 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -85,7 +85,7 @@ pub async fn handle_announce( ) -> Result { check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; - let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); + let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request); tracker .authenticate_request(&wrapped_announce_request.info_hash, &None) diff --git a/src/udp/request.rs b/src/udp/request.rs index 53d646f1a..34139384b 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -24,7 +24,7 @@ pub struct AnnounceRequestWrapper { impl AnnounceRequestWrapper { #[must_use] - pub fn new(announce_request: AnnounceRequest) -> Self { + pub fn new(announce_request: &AnnounceRequest) -> Self { AnnounceRequestWrapper { announce_request: announce_request.clone(), info_hash: InfoHash(announce_request.info_hash.0), From 436a0c1e03b09878bbb16102f83fd3350f95e054 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:25:56 +0100 Subject: [PATCH 189/435] clippy: fix src/udp/server.rs --- src/jobs/udp_tracker.rs | 4 ++-- src/udp/server.rs | 21 ++++++++++++++------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index d5fdae4c1..57369f660 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -5,14 +5,14 @@ use tokio::task::JoinHandle; use crate::config::UdpTracker; use crate::tracker; -use crate::udp::server::UdpServer; +use crate::udp::server::Udp; #[must_use] pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); tokio::spawn(async move { - match UdpServer::new(tracker, &bind_addr).await { + match Udp::new(tracker, &bind_addr).await { Ok(udp_server) => { info!("Starting UDP server on: {}", bind_addr); udp_server.start().await; diff --git a/src/udp/server.rs b/src/udp/server.rs index 705a6c263..5bd835365 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -10,21 +10,27 @@ use crate::tracker; use crate::udp::handlers::handle_packet; use crate::udp::MAX_PACKET_SIZE; -pub struct UdpServer { +pub struct Udp { socket: Arc, tracker: Arc, } -impl UdpServer { - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { +impl Udp { + /// # Errors + /// + /// Will return `Err` unable to bind to the supplied `bind_address`. + pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { let socket = UdpSocket::bind(bind_address).await?; - Ok(UdpServer { + Ok(Udp { socket: Arc::new(socket), tracker, }) } + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied Ā“socketĀ“. pub async fn start(&self) { loop { let mut data = [0; MAX_PACKET_SIZE]; @@ -43,7 +49,7 @@ impl UdpServer { debug!("{:?}", payload); let response = handle_packet(remote_addr, payload, tracker).await; - UdpServer::send_response(socket, remote_addr, response).await; + Udp::send_response(socket, remote_addr, response).await; } } } @@ -57,11 +63,12 @@ impl UdpServer { match response.write(&mut cursor) { Ok(_) => { + #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; let inner = cursor.get_ref(); debug!("{:?}", &inner[..position]); - UdpServer::send_packet(socket, &remote_addr, &inner[..position]).await; + Udp::send_packet(socket, &remote_addr, &inner[..position]).await; } Err(_) => { debug!("could not write response to bytes."); @@ -71,6 +78,6 @@ impl UdpServer { async fn send_packet(socket: Arc, remote_addr: &SocketAddr, payload: &[u8]) { // doesn't matter if it reaches or not - let _ = socket.send_to(payload, remote_addr).await; + drop(socket.send_to(payload, remote_addr).await); } } From 6564c10de70ebd1fda24443537205bf5186be44c Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:30:19 +0100 Subject: [PATCH 190/435] clippy: fix src/protocol/crypto.rs --- src/protocol/crypto.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/protocol/crypto.rs b/src/protocol/crypto.rs index 6e1517ef8..2d3f8f6fa 100644 --- a/src/protocol/crypto.rs +++ b/src/protocol/crypto.rs @@ -48,12 +48,12 @@ pub mod keys { #[test] fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { - assert_eq!(DefaultSeed::get_seed(), ZeroedTestSeed::get_seed()) + assert_eq!(DefaultSeed::get_seed(), ZeroedTestSeed::get_seed()); } #[test] fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { - assert_ne!(DefaultSeed::get_seed(), InstanceSeed::get_seed()) + assert_ne!(DefaultSeed::get_seed(), InstanceSeed::get_seed()); } } @@ -79,12 +79,12 @@ pub mod keys { #[test] fn it_should_have_a_zero_test_seed() { - assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]) + assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); } #[test] fn it_should_default_to_zeroed_seed_when_testing() { - assert_eq!(*DEFAULT_SEED, *ZEROED_TEST_SEED) + assert_eq!(*DEFAULT_SEED, *ZEROED_TEST_SEED); } #[test] From baba21b31f669904c2528b87d91dc07edbcaa60b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Fri, 25 Nov 2022 20:37:27 +0100 Subject: [PATCH 191/435] clippy: fix tests/udp.rs --- tests/udp.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tests/udp.rs b/tests/udp.rs index e93894843..8bad37dbe 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -48,7 +48,7 @@ mod udp_tracker_server { } } - pub async fn start(&mut self, configuration: Arc) { + pub fn start(&mut self, configuration: &Arc) { if !self.started.load(Ordering::Relaxed) { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -68,7 +68,7 @@ mod udp_tracker_server { }; // Initialize logging - logging::setup(&configuration); + logging::setup(configuration); let udp_tracker_config = &configuration.udp_trackers[0]; @@ -82,9 +82,9 @@ mod udp_tracker_server { } } - async fn new_running_udp_server(configuration: Arc) -> UdpServer { + fn new_running_udp_server(configuration: &Arc) -> UdpServer { let mut udp_server = UdpServer::new(); - udp_server.start(configuration).await; + udp_server.start(configuration); udp_server } @@ -101,7 +101,7 @@ mod udp_tracker_server { } async fn connect(&self, remote_address: &str) { - self.socket.connect(remote_address).await.unwrap() + self.socket.connect(remote_address).await.unwrap(); } async fn send(&self, bytes: &[u8]) -> usize { @@ -134,12 +134,13 @@ mod udp_tracker_server { let request_data = match request.write(&mut cursor) { Ok(_) => { + #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; let inner_request_buffer = cursor.get_ref(); // Return slice which contains written request data &inner_request_buffer[..position] } - Err(_) => panic!("could not write request to bytes."), + Err(e) => panic!("could not write request to bytes: {e}."), }; self.udp_client.send(request_data).await @@ -199,7 +200,7 @@ mod udp_tracker_server { async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_client(&udp_server.bind_address.unwrap()).await; @@ -216,7 +217,7 @@ mod udp_tracker_server { async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; @@ -248,7 +249,7 @@ mod udp_tracker_server { async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; @@ -282,7 +283,7 @@ mod udp_tracker_server { async fn should_return_a_scrape_response_when_the_client_sends_a_scrape_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; From 0d162a12880754412a989cda09ce91c03dceb6e5 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 28 Nov 2022 18:51:58 +0100 Subject: [PATCH 192/435] refactor: correct naming of structs and enums --- src/api/resources/auth_key_resource.rs | 20 +- src/api/resources/torrent_resource.rs | 7 +- src/api/server.rs | 6 +- src/config.rs | 34 +-- src/databases/database.rs | 2 +- src/databases/mysql.rs | 3 +- src/databases/sqlite.rs | 2 +- src/http/{errors.rs => error.rs} | 8 +- src/http/filters.rs | 37 +-- src/http/handlers.rs | 47 ++-- src/http/mod.rs | 2 +- src/http/request.rs | 7 +- src/http/response.rs | 6 +- src/protocol/common.rs | 318 ------------------------- src/protocol/crypto.rs | 30 +-- src/protocol/info_hash.rs | 190 +++++++++++++++ src/protocol/mod.rs | 1 + src/tracker/mod.rs | 10 +- src/tracker/mode.rs | 2 +- src/tracker/peer.rs | 144 ++++++++++- src/tracker/torrent.rs | 13 +- src/udp/connection_cookie.rs | 10 +- src/udp/{errors.rs => error.rs} | 18 +- src/udp/handlers.rs | 65 +++-- src/udp/mod.rs | 2 +- src/udp/request.rs | 8 +- tests/api.rs | 10 +- 27 files changed, 495 insertions(+), 507 deletions(-) rename src/http/{errors.rs => error.rs} (86%) create mode 100644 src/protocol/info_hash.rs rename src/udp/{errors.rs => error.rs} (57%) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key_resource.rs index 9b3cc9646..b575984db 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key_resource.rs @@ -6,13 +6,13 @@ use crate::protocol::clock::DurationSinceUnixEpoch; use crate::tracker::key::Auth; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct AuthKeyResource { +pub struct AuthKey { pub key: String, pub valid_until: Option, } -impl From for Auth { - fn from(auth_key_resource: AuthKeyResource) -> Self { +impl From for Auth { + fn from(auth_key_resource: AuthKey) -> Self { Auth { key: auth_key_resource.key, valid_until: auth_key_resource @@ -22,9 +22,9 @@ impl From for Auth { } } -impl From for AuthKeyResource { +impl From for AuthKey { fn from(auth_key: Auth) -> Self { - AuthKeyResource { + AuthKey { key: auth_key.key, valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), } @@ -35,7 +35,7 @@ impl From for AuthKeyResource { mod tests { use std::time::Duration; - use super::AuthKeyResource; + use super::AuthKey; use crate::protocol::clock::{Current, TimeNow}; use crate::tracker::key::Auth; @@ -43,7 +43,7 @@ mod tests { fn it_should_be_convertible_into_an_auth_key() { let duration_in_secs = 60; - let auth_key_resource = AuthKeyResource { + let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(duration_in_secs), }; @@ -67,8 +67,8 @@ mod tests { }; assert_eq!( - AuthKeyResource::from(auth_key), - AuthKeyResource { + AuthKey::from(auth_key), + AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(duration_in_secs) } @@ -78,7 +78,7 @@ mod tests { #[test] fn it_should_be_convertible_into_json() { assert_eq!( - serde_json::to_string(&AuthKeyResource { + serde_json::to_string(&AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(60) }) diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index eb9620d23..4063b95f5 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,7 +1,6 @@ use serde::{Deserialize, Serialize}; -use crate::protocol::common::PeerId; -use crate::tracker::peer::TorrentPeer; +use crate::tracker::peer::{self, TorrentPeer}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentResource { @@ -42,8 +41,8 @@ pub struct PeerIdResource { pub client: Option, } -impl From for PeerIdResource { - fn from(peer_id: PeerId) -> Self { +impl From for PeerIdResource { + fn from(peer_id: peer::Id) -> Self { PeerIdResource { id: peer_id.get_id(), client: peer_id.get_client_name().map(std::string::ToString::to_string), diff --git a/src/api/server.rs b/src/api/server.rs index fac25e297..61fd8ed3d 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -7,10 +7,10 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; -use super::resources::auth_key_resource::AuthKeyResource; +use super::resources::auth_key_resource::AuthKey; use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker; #[derive(Deserialize, Debug)] @@ -243,7 +243,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl w }) .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from(auth_key))), + Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into(), })), diff --git a/src/config.rs b/src/config.rs index 9f6ca7092..67177aca1 100644 --- a/src/config.rs +++ b/src/config.rs @@ -41,7 +41,7 @@ pub struct HttpApi { #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, - pub mode: mode::Tracker, + pub mode: mode::Mode, pub db_driver: Drivers, pub db_path: String, pub announce_interval: u32, @@ -59,7 +59,7 @@ pub struct Configuration { } #[derive(Debug)] -pub enum ConfigurationError { +pub enum Error { Message(String), ConfigError(ConfigError), IOError(std::io::Error), @@ -67,19 +67,19 @@ pub enum ConfigurationError { TrackerModeIncompatible, } -impl std::fmt::Display for ConfigurationError { +impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { - ConfigurationError::Message(e) => e.fmt(f), - ConfigurationError::ConfigError(e) => e.fmt(f), - ConfigurationError::IOError(e) => e.fmt(f), - ConfigurationError::ParseError(e) => e.fmt(f), - ConfigurationError::TrackerModeIncompatible => write!(f, "{:?}", self), + Error::Message(e) => e.fmt(f), + Error::ConfigError(e) => e.fmt(f), + Error::IOError(e) => e.fmt(f), + Error::ParseError(e) => e.fmt(f), + Error::TrackerModeIncompatible => write!(f, "{:?}", self), } } } -impl std::error::Error for ConfigurationError {} +impl std::error::Error for Error {} impl Configuration { #[must_use] @@ -97,7 +97,7 @@ impl Configuration { pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), - mode: mode::Tracker::Public, + mode: mode::Mode::Public, db_driver: Drivers::Sqlite3, db_path: String::from("data.db"), announce_interval: 120, @@ -137,7 +137,7 @@ impl Configuration { /// # Errors /// /// Will return `Err` if `path` does not exist or has a bad configuration. - pub fn load_from_file(path: &str) -> Result { + pub fn load_from_file(path: &str) -> Result { let config_builder = Config::builder(); #[allow(unused_assignments)] @@ -147,18 +147,18 @@ impl Configuration { config = config_builder .add_source(File::with_name(path)) .build() - .map_err(ConfigurationError::ConfigError)?; + .map_err(Error::ConfigError)?; } else { eprintln!("No config file found."); eprintln!("Creating config file.."); let config = Configuration::default(); config.save_to_file(path)?; - return Err(ConfigurationError::Message( + return Err(Error::Message( "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), )); } - let torrust_config: Configuration = config.try_deserialize().map_err(ConfigurationError::ConfigError)?; + let torrust_config: Configuration = config.try_deserialize().map_err(Error::ConfigError)?; Ok(torrust_config) } @@ -167,7 +167,7 @@ impl Configuration { /// /// Will return `Err` if `filename` does not exist or the user does not have /// permission to read it. - pub fn save_to_file(&self, path: &str) -> Result<(), ConfigurationError> { + pub fn save_to_file(&self, path: &str) -> Result<(), Error> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write(path, toml_string).expect("Could not write to file!"); Ok(()) @@ -176,7 +176,7 @@ impl Configuration { #[cfg(test)] mod tests { - use crate::config::{Configuration, ConfigurationError}; + use crate::config::{Configuration, Error}; #[cfg(test)] fn default_config_toml() -> String { @@ -294,7 +294,7 @@ mod tests { #[test] fn configuration_error_could_be_displayed() { - let error = ConfigurationError::TrackerModeIncompatible; + let error = Error::TrackerModeIncompatible; assert_eq!(format!("{}", error), "TrackerModeIncompatible"); } diff --git a/src/databases/database.rs b/src/databases/database.rs index 5e4a7c1f9..7055d2a09 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::databases::mysql::Mysql; use crate::databases::sqlite::Sqlite; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 4fd00e31e..0dafc3a60 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -10,7 +10,8 @@ use r2d2_mysql::MysqlConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; -use crate::protocol::common::{InfoHash, AUTH_KEY_LENGTH}; +use crate::protocol::common::AUTH_KEY_LENGTH; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; pub struct Mysql { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 159da9922..39dea8502 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -8,7 +8,7 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; pub struct Sqlite { diff --git a/src/http/errors.rs b/src/http/error.rs similarity index 86% rename from src/http/errors.rs rename to src/http/error.rs index fe0cf26e6..b6c08a8ba 100644 --- a/src/http/errors.rs +++ b/src/http/error.rs @@ -2,12 +2,12 @@ use thiserror::Error; use warp::reject::Reject; #[derive(Error, Debug)] -pub enum ServerError { +pub enum Error { #[error("internal server error")] - InternalServerError, + InternalServer, #[error("info_hash is either missing or invalid")] - InvalidInfoHash, + InvalidInfo, #[error("peer_id is either missing or invalid")] InvalidPeerId, @@ -31,4 +31,4 @@ pub enum ServerError { ExceededInfoHashLimit, } -impl Reject for ServerError {} +impl Reject for Error {} diff --git a/src/http/filters.rs b/src/http/filters.rs index 2c3ab626d..484ae2311 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -5,12 +5,13 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; -use super::errors::ServerError; -use super::request::{Announce, AnnounceRequestQuery, Scrape}; +use super::error::Error; +use super::request::{Announce, AnnounceQuery, Scrape}; use super::WebResult; -use crate::protocol::common::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; -use crate::tracker; +use crate::protocol::common::MAX_SCRAPE_TORRENTS; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; +use crate::tracker::{self, peer}; /// Pass Arc along #[must_use] @@ -28,7 +29,7 @@ pub fn with_info_hash() -> impl Filter,), Error = Rejec /// Check for `PeerId` #[must_use] -pub fn with_peer_id() -> impl Filter + Clone { +pub fn with_peer_id() -> impl Filter + Clone { warp::filters::query::raw().and_then(|q| async move { peer_id(&q) }) } @@ -54,7 +55,7 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { - warp::filters::query::query::() + warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) .and(with_peer_addr(on_reverse_proxy)) @@ -88,9 +89,9 @@ fn info_hashes(raw_query: &String) -> WebResult> { } if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { - Err(reject::custom(ServerError::ExceededInfoHashLimit)) + Err(reject::custom(Error::ExceededInfoHashLimit)) } else if info_hashes.is_empty() { - Err(reject::custom(ServerError::InvalidInfoHash)) + Err(reject::custom(Error::InvalidInfo)) } else { Ok(info_hashes) } @@ -98,11 +99,11 @@ fn info_hashes(raw_query: &String) -> WebResult> { /// Parse `PeerId` from raw query string #[allow(clippy::ptr_arg)] -fn peer_id(raw_query: &String) -> WebResult { +fn peer_id(raw_query: &String) -> WebResult { // put all query params in a vec let split_raw_query: Vec<&str> = raw_query.split('&').collect(); - let mut peer_id: Option = None; + let mut peer_id: Option = None; for v in split_raw_query { // look for the peer_id param @@ -115,32 +116,32 @@ fn peer_id(raw_query: &String) -> WebResult { // peer_id must be 20 bytes if peer_id_bytes.len() != 20 { - return Err(reject::custom(ServerError::InvalidPeerId)); + return Err(reject::custom(Error::InvalidPeerId)); } // clone peer_id_bytes into fixed length array let mut byte_arr: [u8; 20] = Default::default(); byte_arr.clone_from_slice(peer_id_bytes.as_slice()); - peer_id = Some(PeerId(byte_arr)); + peer_id = Some(peer::Id(byte_arr)); break; } } match peer_id { Some(id) => Ok(id), - None => Err(reject::custom(ServerError::InvalidPeerId)), + None => Err(reject::custom(Error::InvalidPeerId)), } } /// Get `PeerAddress` from `RemoteAddress` or Forwarded fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound)); } if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound)); } if on_reverse_proxy { @@ -152,7 +153,7 @@ fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, - peer_id: PeerId, + peer_id: peer::Id, peer_addr: IpAddr, ) -> WebResult { Ok(Announce { diff --git a/src/http/handlers.rs b/src/http/handlers.rs index d4ae76e65..ff5469168 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -7,11 +7,10 @@ use log::debug; use warp::http::Response; use warp::{reject, Rejection, Reply}; -use super::errors::ServerError; -use super::response::{self, Peer, ScrapeResponseEntry}; +use super::error::Error; +use super::response::{self, Peer, ScrapeEntry}; use super::{request, WebResult}; -use crate::http::response::Error; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; use crate::tracker::{self, peer, statistics, torrent}; @@ -20,18 +19,14 @@ use crate::tracker::{self, peer, statistics, torrent}; /// # Errors /// /// Will return `ServerError` that wraps the `Error` if unable to `authenticate_request`. -pub async fn authenticate( - info_hash: &InfoHash, - auth_key: &Option, - tracker: Arc, -) -> Result<(), ServerError> { +pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), Error> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { - torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, - torrent::Error::NoPeersFound => ServerError::NoPeersFound, - torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, - torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, + torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, + torrent::Error::NoPeersFound => Error::NoPeersFound, + torrent::Error::CouldNotSendResponse => Error::InternalServer, + torrent::Error::InvalidInfoHash => Error::InvalidInfo, }) } @@ -91,7 +86,7 @@ pub async fn handle_scrape( auth_key: Option, tracker: Arc, ) -> WebResult { - let mut files: HashMap = HashMap::new(); + let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; for info_hash in &scrape_request.info_hashes { @@ -99,20 +94,20 @@ pub async fn handle_scrape( Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeResponseEntry { + ScrapeEntry { complete: seeders, downloaded: completed, incomplete: leechers, } } else { - ScrapeResponseEntry { + ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, } } } - None => ScrapeResponseEntry { + None => ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, @@ -165,7 +160,7 @@ fn send_announce_response( if let Some(1) = announce_request.compact { match res.write_compact() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)), + Err(_) => Err(reject::custom(Error::InternalServer)), } } else { Ok(Response::new(res.write().into())) @@ -173,12 +168,12 @@ fn send_announce_response( } /// Send scrape response -fn send_scrape_response(files: HashMap) -> WebResult { +fn send_scrape_response(files: HashMap) -> WebResult { let res = response::Scrape { files }; match res.write() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)), + Err(_) => Err(reject::custom(Error::InternalServer)), } } @@ -188,15 +183,15 @@ fn send_scrape_response(files: HashMap) -> WebRes /// /// Will not return a error, `Infallible`, but instead convert the `ServerError` into a `Response`. pub fn send_error(r: &Rejection) -> std::result::Result { - let body = if let Some(server_error) = r.find::() { + let body = if let Some(server_error) = r.find::() { debug!("{:?}", server_error); - Error { + response::Error { failure_reason: server_error.to_string(), } .write() } else { - Error { - failure_reason: ServerError::InternalServerError.to_string(), + response::Error { + failure_reason: Error::InternalServer.to_string(), } .write() }; diff --git a/src/http/mod.rs b/src/http/mod.rs index 6e3ce7111..701dba407 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,4 +1,4 @@ -pub mod errors; +pub mod error; pub mod filters; pub mod handlers; pub mod request; diff --git a/src/http/request.rs b/src/http/request.rs index b812e1173..bc549b698 100644 --- a/src/http/request.rs +++ b/src/http/request.rs @@ -3,10 +3,11 @@ use std::net::IpAddr; use serde::Deserialize; use crate::http::Bytes; -use crate::protocol::common::{InfoHash, PeerId}; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::peer; #[derive(Deserialize)] -pub struct AnnounceRequestQuery { +pub struct AnnounceQuery { pub downloaded: Option, pub uploaded: Option, pub key: Option, @@ -22,7 +23,7 @@ pub struct Announce { pub peer_addr: IpAddr, pub downloaded: Bytes, pub uploaded: Bytes, - pub peer_id: PeerId, + pub peer_id: peer::Id, pub port: u16, pub left: Bytes, pub event: Option, diff --git a/src/http/response.rs b/src/http/response.rs index 98ea6fe73..962e72fac 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -5,7 +5,7 @@ use std::net::IpAddr; use serde; use serde::Serialize; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; #[derive(Serialize)] pub struct Peer { @@ -78,7 +78,7 @@ impl Announce { } #[derive(Serialize)] -pub struct ScrapeResponseEntry { +pub struct ScrapeEntry { pub complete: u32, pub downloaded: u32, pub incomplete: u32, @@ -86,7 +86,7 @@ pub struct ScrapeResponseEntry { #[derive(Serialize)] pub struct Scrape { - pub files: HashMap, + pub files: HashMap, } impl Scrape { diff --git a/src/protocol/common.rs b/src/protocol/common.rs index d6a98cf03..527ae9ebc 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -25,321 +25,3 @@ pub enum AnnounceEventDef { #[derive(Serialize, Deserialize)] #[serde(remote = "NumberOfBytes")] pub struct NumberOfBytesDef(pub i64); - -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct InfoHash(pub [u8; 20]); - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self([0u8; 20]); - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl Ord for InfoHash { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0.cmp(&other.0) - } -} - -impl std::cmp::PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - self.0.partial_cmp(&other.0) - } -} - -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = InfoHash([0u8; 20]); - ret.0.clone_from_slice(data); - ret - } -} - -impl std::convert::From<[u8; 20]> for InfoHash { - fn from(val: [u8; 20]) -> Self { - InfoHash(val) - } -} - -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -#[cfg(test)] -mod tests { - - use std::str::FromStr; - - use serde::{Deserialize, Serialize}; - use serde_json::json; - - use super::InfoHash; - - #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] - struct ContainingInfoHash { - pub info_hash: InfoHash, - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { - let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); - assert!(info_hash.is_ok()); - } - - #[test] - fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { - let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); - assert!(info_hash.is_err()); - } - - #[test] - fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { - let info_hash = InfoHash::from_str(&"F".repeat(39)); - assert!(info_hash.is_err()); - - let info_hash = InfoHash::from_str(&"F".repeat(41)); - assert!(info_hash.is_err()); - } - - #[test] - fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { - let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - - let output = format!("{}", info_hash); - - assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { - let info_hash: InfoHash = [255u8; 20].as_slice().into(); - - assert_eq!( - info_hash, - InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - ); - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { - let info_hash: InfoHash = [255u8; 20].into(); - - assert_eq!( - info_hash, - InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - ); - } - - #[test] - fn an_info_hash_can_be_serialized() { - let s = ContainingInfoHash { - info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), - }; - - let json_serialized_value = serde_json::to_string(&s).unwrap(); - - assert_eq!( - json_serialized_value, - r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# - ); - } - - #[test] - fn an_info_hash_can_be_deserialized() { - let json = json!({ - "info_hash": "ffffffffffffffffffffffffffffffffffffffff", - }); - - let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); - - assert_eq!( - s, - ContainingInfoHash { - info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - } - ); - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a 40 character long string", - )); - } - - let mut res = InfoHash([0u8; 20]); - - if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a hexadecimal string", - )); - }; - Ok(res) - } -} - -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] -pub struct PeerId(pub [u8; 20]); - -impl std::fmt::Display for PeerId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut buffer = [0u8; 20]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); - match bytes_out { - Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), - None => write!(f, ""), - } - } -} - -impl PeerId { - #[must_use] - pub fn get_id(&self) -> Option { - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - - std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) - } - - #[must_use] - pub fn get_client_name(&self) -> Option<&'static str> { - if self.0[0] == b'M' { - return Some("BitTorrent"); - } - if self.0[0] == b'-' { - let name = match &self.0[1..3] { - b"AG" | b"A~" => "Ares", - b"AR" => "Arctic", - b"AV" => "Avicora", - b"AX" => "BitPump", - b"AZ" => "Azureus", - b"BB" => "BitBuddy", - b"BC" => "BitComet", - b"BF" => "Bitflu", - b"BG" => "BTG (uses Rasterbar libtorrent)", - b"BR" => "BitRocket", - b"BS" => "BTSlave", - b"BX" => "~Bittorrent X", - b"CD" => "Enhanced CTorrent", - b"CT" => "CTorrent", - b"DE" => "DelugeTorrent", - b"DP" => "Propagate Data Client", - b"EB" => "EBit", - b"ES" => "electric sheep", - b"FT" => "FoxTorrent", - b"FW" => "FrostWire", - b"FX" => "Freebox BitTorrent", - b"GS" => "GSTorrent", - b"HL" => "Halite", - b"HN" => "Hydranode", - b"KG" => "KGet", - b"KT" => "KTorrent", - b"LH" => "LH-ABC", - b"LP" => "Lphant", - b"LT" => "libtorrent", - b"lt" => "libTorrent", - b"LW" => "LimeWire", - b"MO" => "MonoTorrent", - b"MP" => "MooPolice", - b"MR" => "Miro", - b"MT" => "MoonlightTorrent", - b"NX" => "Net Transport", - b"PD" => "Pando", - b"qB" => "qBittorrent", - b"QD" => "QQDownload", - b"QT" => "Qt 4 Torrent example", - b"RT" => "Retriever", - b"S~" => "Shareaza alpha/beta", - b"SB" => "~Swiftbit", - b"SS" => "SwarmScope", - b"ST" => "SymTorrent", - b"st" => "sharktorrent", - b"SZ" => "Shareaza", - b"TN" => "TorrentDotNET", - b"TR" => "Transmission", - b"TS" => "Torrentstorm", - b"TT" => "TuoTu", - b"UL" => "uLeecher!", - b"UT" => "µTorrent", - b"UW" => "µTorrent Web", - b"VG" => "Vagaa", - b"WD" => "WebTorrent Desktop", - b"WT" => "BitLet", - b"WW" => "WebTorrent", - b"WY" => "FireTorrent", - b"XL" => "Xunlei", - b"XT" => "XanTorrent", - b"XX" => "Xtorrent", - b"ZT" => "ZipTorrent", - _ => return None, - }; - Some(name) - } else { - None - } - } -} - -impl Serialize for PeerId { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - #[derive(Serialize)] - struct PeerIdInfo<'a> { - id: Option, - client: Option<&'a str>, - } - - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - let id = std::str::from_utf8(&tmp).ok(); - - let obj = PeerIdInfo { - id: self.get_id(), - client: self.get_client_name(), - }; - obj.serialize(serializer) - } -} diff --git a/src/protocol/crypto.rs b/src/protocol/crypto.rs index 2d3f8f6fa..a335e2dba 100644 --- a/src/protocol/crypto.rs +++ b/src/protocol/crypto.rs @@ -1,18 +1,18 @@ pub mod keys { pub mod seeds { - use self::detail::DEFAULT_SEED; + use self::detail::CURRENT_SEED; use crate::ephemeral_instance_keys::{Seed, RANDOM_SEED}; - pub trait SeedKeeper { + pub trait Keeper { type Seed: Sized + Default + AsMut<[u8]>; fn get_seed() -> &'static Self::Seed; } - pub struct InstanceSeed; - pub struct DefaultSeed; + pub struct Instance; + pub struct Current; - impl SeedKeeper for InstanceSeed { + impl Keeper for Instance { type Seed = Seed; fn get_seed() -> &'static Self::Seed { @@ -20,24 +20,24 @@ pub mod keys { } } - impl SeedKeeper for DefaultSeed { + impl Keeper for Current { type Seed = Seed; #[allow(clippy::needless_borrow)] fn get_seed() -> &'static Self::Seed { - &DEFAULT_SEED + &CURRENT_SEED } } #[cfg(test)] mod tests { use super::detail::ZEROED_TEST_SEED; - use super::{DefaultSeed, InstanceSeed, SeedKeeper}; + use super::{Current, Instance, Keeper}; use crate::ephemeral_instance_keys::Seed; pub struct ZeroedTestSeed; - impl SeedKeeper for ZeroedTestSeed { + impl Keeper for ZeroedTestSeed { type Seed = Seed; #[allow(clippy::needless_borrow)] @@ -48,12 +48,12 @@ pub mod keys { #[test] fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { - assert_eq!(DefaultSeed::get_seed(), ZeroedTestSeed::get_seed()); + assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); } #[test] fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { - assert_ne!(DefaultSeed::get_seed(), InstanceSeed::get_seed()); + assert_ne!(Current::get_seed(), Instance::get_seed()); } } @@ -64,10 +64,10 @@ pub mod keys { pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; #[cfg(test)] - pub use ZEROED_TEST_SEED as DEFAULT_SEED; + pub use ZEROED_TEST_SEED as CURRENT_SEED; #[cfg(not(test))] - pub use crate::ephemeral_instance_keys::RANDOM_SEED as DEFAULT_SEED; + pub use crate::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; #[cfg(test)] mod tests { @@ -75,7 +75,7 @@ pub mod keys { use crate::ephemeral_instance_keys::RANDOM_SEED; use crate::protocol::crypto::keys::seeds::detail::ZEROED_TEST_SEED; - use crate::protocol::crypto::keys::seeds::DEFAULT_SEED; + use crate::protocol::crypto::keys::seeds::CURRENT_SEED; #[test] fn it_should_have_a_zero_test_seed() { @@ -84,7 +84,7 @@ pub mod keys { #[test] fn it_should_default_to_zeroed_seed_when_testing() { - assert_eq!(*DEFAULT_SEED, *ZEROED_TEST_SEED); + assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); } #[test] diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs new file mode 100644 index 000000000..3b9b2fa35 --- /dev/null +++ b/src/protocol/info_hash.rs @@ -0,0 +1,190 @@ +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +pub struct InfoHash(pub [u8; 20]); + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self([0u8; 20]); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::cmp::PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = InfoHash([0u8; 20]); + ret.0.clone_from_slice(data); + ret + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"expected a 40 character long string", + )); + } + + let mut res = InfoHash([0u8; 20]); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"expected a hexadecimal string", + )); + }; + Ok(res) + } +} + +#[cfg(test)] +mod tests { + + use std::str::FromStr; + + use serde::{Deserialize, Serialize}; + use serde_json::json; + + use super::InfoHash; + + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] + struct ContainingInfoHash { + pub info_hash: InfoHash, + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); + assert!(info_hash.is_ok()); + } + + #[test] + fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { + let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { + let info_hash = InfoHash::from_str(&"F".repeat(39)); + assert!(info_hash.is_err()); + + let info_hash = InfoHash::from_str(&"F".repeat(41)); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + let output = format!("{}", info_hash); + + assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { + let info_hash: InfoHash = [255u8; 20].as_slice().into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { + let info_hash: InfoHash = [255u8; 20].into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_serialized() { + let s = ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + }; + + let json_serialized_value = serde_json::to_string(&s).unwrap(); + + assert_eq!( + json_serialized_value, + r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# + ); + } + + #[test] + fn an_info_hash_can_be_deserialized() { + let json = json!({ + "info_hash": "ffffffffffffffffffffffffffffffffffffffff", + }); + + let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); + + assert_eq!( + s, + ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + } + ); + } +} diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs index 85e4f90ad..bd4310dcf 100644 --- a/src/protocol/mod.rs +++ b/src/protocol/mod.rs @@ -1,4 +1,5 @@ pub mod clock; pub mod common; pub mod crypto; +pub mod info_hash; pub mod utils; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index fcd9ebe2d..508280b1a 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -16,12 +16,12 @@ use tokio::sync::{RwLock, RwLockReadGuard}; use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; use crate::tracker::key::Auth; pub struct Tracker { pub config: Arc, - mode: mode::Tracker, + mode: mode::Mode, keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, @@ -54,15 +54,15 @@ impl Tracker { } pub fn is_public(&self) -> bool { - self.mode == mode::Tracker::Public + self.mode == mode::Mode::Public } pub fn is_private(&self) -> bool { - self.mode == mode::Tracker::Private || self.mode == mode::Tracker::PrivateListed + self.mode == mode::Mode::Private || self.mode == mode::Mode::PrivateListed } pub fn is_whitelisted(&self) -> bool { - self.mode == mode::Tracker::Listed || self.mode == mode::Tracker::PrivateListed + self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } /// # Errors diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index f1fff169e..a0dba6e67 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -2,7 +2,7 @@ use serde; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] -pub enum Tracker { +pub enum Mode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] Public, diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index dd49ffaa7..16aada0ed 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -6,12 +6,12 @@ use serde::Serialize; use crate::http::request::Announce; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; -use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; +use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; #[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] pub struct TorrentPeer { - pub peer_id: PeerId, + pub peer_id: Id, pub peer_addr: SocketAddr, #[serde(serialize_with = "ser_unix_time_value")] pub updated: DurationSinceUnixEpoch, @@ -35,7 +35,7 @@ impl TorrentPeer { let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); TorrentPeer { - peer_id: PeerId(announce_request.peer_id.0), + peer_id: Id(announce_request.peer_id.0), peer_addr, updated: Current::now(), uploaded: announce_request.bytes_uploaded, @@ -88,6 +88,133 @@ impl TorrentPeer { } } +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] +pub struct Id(pub [u8; 20]); + +impl std::fmt::Display for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut buffer = [0u8; 20]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); + match bytes_out { + Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), + None => write!(f, ""), + } + } +} + +impl Id { + #[must_use] + pub fn get_id(&self) -> Option { + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + + std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) + } + + #[must_use] + pub fn get_client_name(&self) -> Option<&'static str> { + if self.0[0] == b'M' { + return Some("BitTorrent"); + } + if self.0[0] == b'-' { + let name = match &self.0[1..3] { + b"AG" | b"A~" => "Ares", + b"AR" => "Arctic", + b"AV" => "Avicora", + b"AX" => "BitPump", + b"AZ" => "Azureus", + b"BB" => "BitBuddy", + b"BC" => "BitComet", + b"BF" => "Bitflu", + b"BG" => "BTG (uses Rasterbar libtorrent)", + b"BR" => "BitRocket", + b"BS" => "BTSlave", + b"BX" => "~Bittorrent X", + b"CD" => "Enhanced CTorrent", + b"CT" => "CTorrent", + b"DE" => "DelugeTorrent", + b"DP" => "Propagate Data Client", + b"EB" => "EBit", + b"ES" => "electric sheep", + b"FT" => "FoxTorrent", + b"FW" => "FrostWire", + b"FX" => "Freebox BitTorrent", + b"GS" => "GSTorrent", + b"HL" => "Halite", + b"HN" => "Hydranode", + b"KG" => "KGet", + b"KT" => "KTorrent", + b"LH" => "LH-ABC", + b"LP" => "Lphant", + b"LT" => "libtorrent", + b"lt" => "libTorrent", + b"LW" => "LimeWire", + b"MO" => "MonoTorrent", + b"MP" => "MooPolice", + b"MR" => "Miro", + b"MT" => "MoonlightTorrent", + b"NX" => "Net Transport", + b"PD" => "Pando", + b"qB" => "qBittorrent", + b"QD" => "QQDownload", + b"QT" => "Qt 4 Torrent example", + b"RT" => "Retriever", + b"S~" => "Shareaza alpha/beta", + b"SB" => "~Swiftbit", + b"SS" => "SwarmScope", + b"ST" => "SymTorrent", + b"st" => "sharktorrent", + b"SZ" => "Shareaza", + b"TN" => "TorrentDotNET", + b"TR" => "Transmission", + b"TS" => "Torrentstorm", + b"TT" => "TuoTu", + b"UL" => "uLeecher!", + b"UT" => "µTorrent", + b"UW" => "µTorrent Web", + b"VG" => "Vagaa", + b"WD" => "WebTorrent Desktop", + b"WT" => "BitLet", + b"WW" => "WebTorrent", + b"WY" => "FireTorrent", + b"XL" => "Xunlei", + b"XT" => "XanTorrent", + b"XX" => "Xtorrent", + b"ZT" => "ZipTorrent", + _ => return None, + }; + Some(name) + } else { + None + } + } +} + +impl Serialize for Id { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + #[derive(Serialize)] + struct PeerIdInfo<'a> { + id: Option, + client: Option<&'a str>, + } + + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + let id = std::str::from_utf8(&tmp).ok(); + + let obj = PeerIdInfo { + id: self.get_id(), + client: self.get_client_name(), + }; + obj.serialize(serializer) + } +} + #[cfg(test)] mod test { mod torrent_peer { @@ -97,13 +224,12 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::protocol::clock::{Current, Time}; - use crate::protocol::common::PeerId; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer::{self, TorrentPeer}; #[test] fn it_should_be_serializable() { let torrent_peer = TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), uploaded: NumberOfBytes(0), @@ -284,8 +410,8 @@ mod test { use std::net::{IpAddr, Ipv4Addr}; use crate::http::request::Announce; - use crate::protocol::common::{InfoHash, PeerId}; - use crate::tracker::peer::TorrentPeer; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer::{self, TorrentPeer}; fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { Announce { @@ -293,7 +419,7 @@ mod test { peer_addr, downloaded: 0u64, uploaded: 0u64, - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: peer::Id(*b"-qB00000000000000000"), port, left: 0u64, event: None, diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 21bcfc513..3e38d2340 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -6,12 +6,12 @@ use serde::{Deserialize, Serialize}; use super::peer; use crate::protocol::clock::{Current, TimeNow}; -use crate::protocol::common::{PeerId, MAX_SCRAPE_TORRENTS}; +use crate::protocol::common::MAX_SCRAPE_TORRENTS; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct Entry { #[serde(skip)] - pub peers: std::collections::BTreeMap, + pub peers: std::collections::BTreeMap, pub completed: u32, } @@ -118,7 +118,6 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; - use crate::protocol::common::PeerId; use crate::tracker::peer; use crate::tracker::torrent::Entry; @@ -129,7 +128,7 @@ mod tests { impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { let default_peer = peer::TorrentPeer { - peer_id: PeerId([0u8; 20]), + peer_id: peer::Id([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: Current::now(), uploaded: NumberOfBytes(0), @@ -150,7 +149,7 @@ mod tests { self } - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { self.peer.peer_id = peer_id; self } @@ -278,9 +277,9 @@ mod tests { assert_eq!(peers.len(), 0); } - fn peer_id_from_i32(number: i32) -> PeerId { + fn peer_id_from_i32(number: i32) -> peer::Id { let peer_id = number.to_le_bytes(); - PeerId([ + peer::Id([ 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], peer_id[3], ]) diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 8a544fa6a..3daa3e0f6 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use aquatic_udp_protocol::ConnectionId; -use super::errors::ServerError; +use super::error::Error; use crate::protocol::clock::time_extent::{Extent, TimeExtent}; pub type Cookie = [u8; 8]; @@ -36,7 +36,7 @@ pub fn make(remote_address: &SocketAddr) -> Cookie { /// # Errors /// /// Will return a `ServerError::InvalidConnectionId` if the supplied `connection_cookie` fails to verify. -pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { +pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { // we loop backwards testing each time_extent until we find one that matches. // (or the lifetime of time_extents is exhausted) for offset in 0..=COOKIE_LIFETIME.amount { @@ -49,7 +49,7 @@ pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result< return Ok(checking_time_extent); } } - Err(ServerError::InvalidConnectionId) + Err(Error::InvalidConnectionId) } mod cookie_builder { @@ -59,7 +59,7 @@ mod cookie_builder { use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; - use crate::protocol::crypto::keys::seeds::{DefaultSeed, SeedKeeper}; + use crate::protocol::crypto::keys::seeds::{Current, Keeper}; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) @@ -70,7 +70,7 @@ mod cookie_builder { } pub(super) fn build(remote_address: &SocketAddr, time_extent: &TimeExtent) -> Cookie { - let seed = DefaultSeed::get_seed(); + let seed = Current::get_seed(); let mut hasher = DefaultHasher::new(); diff --git a/src/udp/errors.rs b/src/udp/error.rs similarity index 57% rename from src/udp/errors.rs rename to src/udp/error.rs index f90149a99..c5fbb3929 100644 --- a/src/udp/errors.rs +++ b/src/udp/error.rs @@ -3,9 +3,9 @@ use thiserror::Error; use crate::tracker::torrent; #[derive(Error, Debug)] -pub enum ServerError { +pub enum Error { #[error("internal server error")] - InternalServerError, + InternalServer, #[error("info_hash is either missing or invalid")] InvalidInfoHash, @@ -35,15 +35,15 @@ pub enum ServerError { BadRequest, } -impl From for ServerError { +impl From for Error { fn from(e: torrent::Error) -> Self { match e { - torrent::Error::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => ServerError::PeerKeyNotValid, - torrent::Error::NoPeersFound => ServerError::NoPeersFound, - torrent::Error::CouldNotSendResponse => ServerError::InternalServerError, - torrent::Error::InvalidInfoHash => ServerError::InvalidInfoHash, + torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, + torrent::Error::NoPeersFound => Error::NoPeersFound, + torrent::Error::CouldNotSendResponse => Error::InternalServer, + torrent::Error::InvalidInfoHash => Error::InvalidInfoHash, } } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 274af1e2c..da4bdbf35 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -7,13 +7,14 @@ use aquatic_udp_protocol::{ }; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; -use crate::protocol::common::{InfoHash, MAX_SCRAPE_TORRENTS}; +use crate::protocol::common::MAX_SCRAPE_TORRENTS; +use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, peer, statistics}; -use crate::udp::errors::ServerError; -use crate::udp::request::AnnounceRequestWrapper; +use crate::udp::error::Error; +use crate::udp::request::AnnounceWrapper; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { - match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { + match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| Error::InternalServer) { Ok(request) => { let transaction_id = match &request { Request::Connect(connect_request) => connect_request.transaction_id, @@ -27,7 +28,7 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A } } // bad request - Err(_) => handle_error(&ServerError::BadRequest, TransactionId(0)), + Err(_) => handle_error(&Error::BadRequest, TransactionId(0)), } } @@ -38,7 +39,7 @@ pub async fn handle_request( request: Request, remote_addr: SocketAddr, tracker: Arc, -) -> Result { +) -> Result { match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, @@ -53,7 +54,7 @@ pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, tracker: Arc, -) -> Result { +) -> Result { let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); @@ -82,10 +83,10 @@ pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: Arc, -) -> Result { +) -> Result { check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; - let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request); + let wrapped_announce_request = AnnounceWrapper::new(announce_request); tracker .authenticate_request(&wrapped_announce_request.info_hash, &None) @@ -173,7 +174,7 @@ pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc, -) -> Result { +) -> Result { let db = tracker.get_torrents().await; let mut torrent_stats: Vec = Vec::new(); @@ -228,7 +229,7 @@ pub async fn handle_scrape( })) } -fn handle_error(e: &ServerError, transaction_id: TransactionId) -> Response { +fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { let message = e.to_string(); Response::from(ErrorResponse { transaction_id, @@ -245,7 +246,6 @@ mod tests { use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; - use crate::protocol::common::PeerId; use crate::tracker::{self, mode, peer, statistics}; fn default_tracker_config() -> Arc { @@ -253,21 +253,17 @@ mod tests { } fn initialized_public_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Public).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Public).into()); initialized_tracker(&configuration) } fn initialized_private_tracker() -> Arc { - let configuration = Arc::new( - TrackerConfigurationBuilder::default() - .with_mode(mode::Tracker::Private) - .into(), - ); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Private).into()); initialized_tracker(&configuration) } fn initialized_whitelisted_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Tracker::Listed).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Listed).into()); initialized_tracker(&configuration) } @@ -299,7 +295,7 @@ mod tests { impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { let default_peer = peer::TorrentPeer { - peer_id: PeerId([255u8; 20]), + peer_id: peer::Id([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), uploaded: NumberOfBytes(0), @@ -310,7 +306,7 @@ mod tests { TorrentPeerBuilder { peer: default_peer } } - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { self.peer.peer_id = peer_id; self } @@ -347,7 +343,7 @@ mod tests { self } - pub fn with_mode(mut self, mode: mode::Tracker) -> Self { + pub fn with_mode(mut self, mode: mode::Mode) -> Self { self.configuration.mode = mode; self } @@ -537,8 +533,7 @@ mod tests { }; use mockall::predicate::eq; - use crate::protocol::common::PeerId; - use crate::tracker::{self, statistics}; + use crate::tracker::{self, peer, statistics}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -570,7 +565,7 @@ mod tests { let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; let expected_peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); @@ -644,7 +639,7 @@ mod tests { let peer_id = AquaticPeerId([255u8; 20]); let peer_using_ipv6 = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -707,7 +702,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::protocol::common::PeerId; + use crate::tracker::peer; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -740,7 +735,7 @@ mod tests { tracker.config.external_ip.clone().unwrap().parse::().unwrap(); let expected_peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V4(external_ip_in_tracker_configuration), client_port)) .into(); @@ -761,8 +756,7 @@ mod tests { }; use mockall::predicate::eq; - use crate::protocol::common::PeerId; - use crate::tracker::{self, statistics}; + use crate::tracker::{self, peer, statistics}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -795,7 +789,7 @@ mod tests { let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; let expected_peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -872,7 +866,7 @@ mod tests { let peer_id = AquaticPeerId([255u8; 20]); let peer_using_ipv4 = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); @@ -1003,8 +997,7 @@ mod tests { }; use super::TorrentPeerBuilder; - use crate::protocol::common::PeerId; - use crate::tracker; + use crate::tracker::{self, peer}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -1046,10 +1039,10 @@ mod tests { } async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { - let peer_id = PeerId([255u8; 20]); + let peer_id = peer::Id([255u8; 20]); let peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(*remote_addr) .with_bytes_left(0) .into(); diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 2a8d42d9f..8b8c8c4f8 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -1,5 +1,5 @@ pub mod connection_cookie; -pub mod errors; +pub mod error; pub mod handlers; pub mod request; pub mod server; diff --git a/src/udp/request.rs b/src/udp/request.rs index 34139384b..c4326b291 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -1,6 +1,6 @@ use aquatic_udp_protocol::AnnounceRequest; -use crate::protocol::common::InfoHash; +use crate::protocol::info_hash::InfoHash; // struct AnnounceRequest { // pub connection_id: i64, @@ -17,15 +17,15 @@ use crate::protocol::common::InfoHash; // pub port: Port // } -pub struct AnnounceRequestWrapper { +pub struct AnnounceWrapper { pub announce_request: AnnounceRequest, pub info_hash: InfoHash, } -impl AnnounceRequestWrapper { +impl AnnounceWrapper { #[must_use] pub fn new(announce_request: &AnnounceRequest) -> Self { - AnnounceRequestWrapper { + AnnounceWrapper { announce_request: announce_request.clone(), info_hash: InfoHash(announce_request.info_hash.0), } diff --git a/tests/api.rs b/tests/api.rs index 72c3c65c7..4f119e6d0 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,15 +16,15 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; use tokio::task::JoinHandle; - use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; + use torrust_tracker::api::resources::auth_key_resource::AuthKey; use torrust_tracker::api::resources::stats_resource::StatsResource; use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; - use torrust_tracker::protocol::common::{InfoHash, PeerId}; + use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::key::Auth; - use torrust_tracker::tracker::peer::TorrentPeer; + use torrust_tracker::tracker::peer::{self, TorrentPeer}; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -189,7 +189,7 @@ mod tracker_api { fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { let torrent_peer = TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), @@ -310,7 +310,7 @@ mod tracker_api { Self { connection_info } } - pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKeyResource { + pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { let url = format!( "http://{}/api/key/{}?token={}", &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token From ec21df90b3f6bb949f150ad13dcafadba6a7d18d Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 28 Nov 2022 19:38:50 +0100 Subject: [PATCH 193/435] rename: Key::Auth to auth::Key --- .../{auth_key_resource.rs => auth_key.rs} | 18 +++++----- src/api/resources/mod.rs | 3 +- src/api/server.rs | 2 +- src/databases/database.rs | 8 ++--- src/databases/mysql.rs | 16 ++++----- src/databases/sqlite.rs | 14 ++++---- src/http/filters.rs | 9 +++-- src/http/handlers.rs | 15 ++++---- src/tracker/{key.rs => auth.rs} | 34 +++++++++---------- src/tracker/mod.rs | 17 +++++----- tests/api.rs | 6 ++-- 11 files changed, 72 insertions(+), 70 deletions(-) rename src/api/resources/{auth_key_resource.rs => auth_key.rs} (88%) rename src/tracker/{key.rs => auth.rs} (83%) diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resources/auth_key.rs similarity index 88% rename from src/api/resources/auth_key_resource.rs rename to src/api/resources/auth_key.rs index b575984db..d5c08f496 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resources/auth_key.rs @@ -3,7 +3,7 @@ use std::convert::From; use serde::{Deserialize, Serialize}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::tracker::key::Auth; +use crate::tracker::auth; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { @@ -11,9 +11,9 @@ pub struct AuthKey { pub valid_until: Option, } -impl From for Auth { +impl From for auth::Key { fn from(auth_key_resource: AuthKey) -> Self { - Auth { + auth::Key { key: auth_key_resource.key, valid_until: auth_key_resource .valid_until @@ -22,8 +22,8 @@ impl From for Auth { } } -impl From for AuthKey { - fn from(auth_key: Auth) -> Self { +impl From for AuthKey { + fn from(auth_key: auth::Key) -> Self { AuthKey { key: auth_key.key, valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), @@ -37,7 +37,7 @@ mod tests { use super::AuthKey; use crate::protocol::clock::{Current, TimeNow}; - use crate::tracker::key::Auth; + use crate::tracker::auth; #[test] fn it_should_be_convertible_into_an_auth_key() { @@ -49,8 +49,8 @@ mod tests { }; assert_eq!( - Auth::from(auth_key_resource), - Auth { + auth::Key::from(auth_key_resource), + auth::Key { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } @@ -61,7 +61,7 @@ mod tests { fn it_should_be_convertible_from_an_auth_key() { let duration_in_secs = 60; - let auth_key = Auth { + let auth_key = auth::Key { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs index 2b3e4b886..f708fc2e4 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resources/mod.rs @@ -6,6 +6,7 @@ //! - [ ] `TorrentResource`, `TorrentListItemResource`, `TorrentPeerResource`, `PeerIdResource` //! - [ ] `StatsResource` //! - [ ] ... -pub mod auth_key_resource; + +pub mod auth_key; pub mod stats_resource; pub mod torrent_resource; diff --git a/src/api/server.rs b/src/api/server.rs index 61fd8ed3d..af2d66458 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -7,7 +7,7 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; -use super::resources::auth_key_resource::AuthKey; +use super::resources::auth_key::AuthKey; use super::resources::stats_resource::StatsResource; use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use crate::protocol::info_hash::InfoHash; diff --git a/src/databases/database.rs b/src/databases/database.rs index 7055d2a09..a4dae57ee 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::databases::mysql::Mysql; use crate::databases::sqlite::Sqlite; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; +use crate::tracker::auth; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub enum Drivers { @@ -42,7 +42,7 @@ pub trait Database: Sync + Send { async fn load_persistent_torrents(&self) -> Result, Error>; - async fn load_keys(&self) -> Result, Error>; + async fn load_keys(&self) -> Result, Error>; async fn load_whitelist(&self) -> Result, Error>; @@ -54,9 +54,9 @@ pub trait Database: Sync + Send { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - async fn get_key_from_keys(&self, key: &str) -> Result; + async fn get_key_from_keys(&self, key: &str) -> Result; - async fn add_key_to_keys(&self, auth_key: &Auth) -> Result; + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; async fn remove_key_from_keys(&self, key: &str) -> Result; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 0dafc3a60..0d79315c6 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -12,7 +12,7 @@ use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; +use crate::tracker::auth; pub struct Mysql { pool: Pool, @@ -61,7 +61,7 @@ impl Database for Mysql { PRIMARY KEY (`id`), UNIQUE (`key`) );", - i8::try_from(AUTH_KEY_LENGTH).expect("Auth Key Length Should fit within a i8!") + i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") ); let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; @@ -91,13 +91,13 @@ impl Database for Mysql { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - let keys: Vec = conn + let keys: Vec = conn .query_map( "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| Auth { + |(key, valid_until): (String, i64)| auth::Key { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, @@ -183,14 +183,14 @@ impl Database for Mysql { } } - async fn get_key_from_keys(&self, key: &str) -> Result { + async fn get_key_from_keys(&self, key: &str) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) .map_err(|_| database::Error::QueryReturnedNoRows)? { - Some((key, valid_until)) => Ok(Auth { + Some((key, valid_until)) => Ok(auth::Key { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }), @@ -198,7 +198,7 @@ impl Database for Mysql { } } - async fn add_key_to_keys(&self, auth_key: &Auth) -> Result { + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let key = auth_key.key.to_string(); diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 39dea8502..c42e9382d 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -9,7 +9,7 @@ use crate::databases::database; use crate::databases::database::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; +use crate::tracker::auth; pub struct Sqlite { pool: Pool, @@ -78,7 +78,7 @@ impl Database for Sqlite { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -87,13 +87,13 @@ impl Database for Sqlite { let key = row.get(0)?; let valid_until: i64 = row.get(1)?; - Ok(Auth { + Ok(auth::Key { key, valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; - let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -186,7 +186,7 @@ impl Database for Sqlite { } } - async fn get_key_from_keys(&self, key: &str) -> Result { + async fn get_key_from_keys(&self, key: &str) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -196,7 +196,7 @@ impl Database for Sqlite { let key: String = row.get(0).unwrap(); let valid_until: i64 = row.get(1).unwrap(); - Ok(Auth { + Ok(auth::Key { key, valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) @@ -205,7 +205,7 @@ impl Database for Sqlite { } } - async fn add_key_to_keys(&self, auth_key: &Auth) -> Result { + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; match conn.execute( diff --git a/src/http/filters.rs b/src/http/filters.rs index 484ae2311..e9432e191 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -10,8 +10,7 @@ use super::request::{Announce, AnnounceQuery, Scrape}; use super::WebResult; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; -use crate::tracker::{self, peer}; +use crate::tracker::{self, auth, peer}; /// Pass Arc along #[must_use] @@ -35,10 +34,10 @@ pub fn with_peer_id() -> impl Filter + /// Pass Arc along #[must_use] -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key: String| Auth::from_string(&key)) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) + .map(|key: String| auth::Key::from_string(&key)) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for `PeerAddress` diff --git a/src/http/handlers.rs b/src/http/handlers.rs index ff5469168..8d8816885 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -11,15 +11,18 @@ use super::error::Error; use super::response::{self, Peer, ScrapeEntry}; use super::{request, WebResult}; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; -use crate::tracker::{self, peer, statistics, torrent}; +use crate::tracker::{self, auth, peer, statistics, torrent}; -/// Authenticate `InfoHash` using optional `AuthKey` +/// Authenticate `InfoHash` using optional `auth::Key` /// /// # Errors /// /// Will return `ServerError` that wraps the `Error` if unable to `authenticate_request`. -pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), Error> { +pub async fn authenticate( + info_hash: &InfoHash, + auth_key: &Option, + tracker: Arc, +) -> Result<(), Error> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, @@ -37,7 +40,7 @@ pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) @@ -83,7 +86,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); diff --git a/src/tracker/key.rs b/src/tracker/auth.rs similarity index 83% rename from src/tracker/key.rs rename to src/tracker/auth.rs index 673780ad0..7ac6d6939 100644 --- a/src/tracker/key.rs +++ b/src/tracker/auth.rs @@ -13,7 +13,7 @@ use crate::protocol::common::AUTH_KEY_LENGTH; /// # Panics /// /// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. -pub fn generate(lifetime: Duration) -> Auth { +pub fn generate(lifetime: Duration) -> Key { let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) @@ -22,7 +22,7 @@ pub fn generate(lifetime: Duration) -> Auth { debug!("Generated key: {}, valid for: {:?} seconds", key, lifetime); - Auth { + Key { key, valid_until: Some(Current::add(&lifetime).unwrap()), } @@ -33,7 +33,7 @@ pub fn generate(lifetime: Duration) -> Auth { /// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. /// /// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. -pub fn verify(auth_key: &Auth) -> Result<(), Error> { +pub fn verify(auth_key: &Key) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); match auth_key.valid_until { @@ -49,25 +49,25 @@ pub fn verify(auth_key: &Auth) -> Result<(), Error> { } #[derive(Serialize, Debug, Eq, PartialEq, Clone)] -pub struct Auth { +pub struct Key { pub key: String, pub valid_until: Option, } -impl Auth { +impl Key { #[must_use] - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { + pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(Auth { key, valid_until: None }) + Some(Key { key, valid_until: None }) } else { None } } #[must_use] - pub fn from_string(key: &str) -> Option { + pub fn from_string(key: &str) -> Option { if key.len() == AUTH_KEY_LENGTH { - Some(Auth { + Some(Key { key: key.to_string(), valid_until: None, }) @@ -100,11 +100,11 @@ mod tests { use std::time::Duration; use crate::protocol::clock::{Current, StoppedTime}; - use crate::tracker::key; + use crate::tracker::auth; #[test] fn auth_key_from_buffer() { - let auth_key = key::Auth::from_buffer([ + let auth_key = auth::Key::from_buffer([ 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, 49, 52, 110, 114, 74, ]); @@ -116,7 +116,7 @@ mod tests { #[test] fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = key::Auth::from_string(key_string); + let auth_key = auth::Key::from_string(key_string); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().key, key_string); @@ -124,9 +124,9 @@ mod tests { #[test] fn generate_valid_auth_key() { - let auth_key = key::generate(Duration::new(9999, 0)); + let auth_key = auth::generate(Duration::new(9999, 0)); - assert!(key::verify(&auth_key).is_ok()); + assert!(auth::verify(&auth_key).is_ok()); } #[test] @@ -135,16 +135,16 @@ mod tests { Current::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. - let auth_key = key::generate(Duration::from_secs(19)); + let auth_key = auth::generate(Duration::from_secs(19)); // Mock the time has passed 10 sec. Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify(&auth_key).is_ok()); + assert!(auth::verify(&auth_key).is_ok()); // Mock the time has passed another 10 sec. Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify(&auth_key).is_err()); + assert!(auth::verify(&auth_key).is_err()); } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 508280b1a..806efee54 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,4 +1,4 @@ -pub mod key; +pub mod auth; pub mod mode; pub mod peer; pub mod statistics; @@ -17,12 +17,11 @@ use crate::config::Configuration; use crate::databases::database; use crate::databases::database::Database; use crate::protocol::info_hash::InfoHash; -use crate::tracker::key::Auth; pub struct Tracker { pub config: Arc, mode: mode::Mode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -68,8 +67,8 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = key::generate(lifetime); + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) @@ -87,10 +86,10 @@ impl Tracker { /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, auth_key: &Auth) -> Result<(), key::Error> { + pub async fn verify_auth_key(&self, auth_key: &auth::Key) -> Result<(), auth::Error> { match self.keys.read().await.get(&auth_key.key) { - None => Err(key::Error::KeyInvalid), - Some(key) => key::verify(key), + None => Err(auth::Error::KeyInvalid), + Some(key) => auth::verify(key), } } @@ -174,7 +173,7 @@ impl Tracker { /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { // no authentication needed in public mode if self.is_public() { return Ok(()); diff --git a/tests/api.rs b/tests/api.rs index 4f119e6d0..22a222698 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,14 +16,14 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; use tokio::task::JoinHandle; - use torrust_tracker::api::resources::auth_key_resource::AuthKey; + use torrust_tracker::api::resources::auth_key::AuthKey; use torrust_tracker::api::resources::stats_resource::StatsResource; use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::key::Auth; + use torrust_tracker::tracker::auth; use torrust_tracker::tracker::peer::{self, TorrentPeer}; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -44,7 +44,7 @@ mod tracker_api { assert!(api_server .tracker .unwrap() - .verify_auth_key(&Auth::from(auth_key)) + .verify_auth_key(&auth::Key::from(auth_key)) .await .is_ok()); } From 36452717a0b21e850adcc7d00282500359cc4a26 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 28 Nov 2022 19:57:03 +0100 Subject: [PATCH 194/435] refactor: rename inside databases --- src/config.rs | 6 +-- src/databases/database.rs | 94 --------------------------------------- src/databases/driver.rs | 7 +++ src/databases/error.rs | 21 +++++++++ src/databases/mod.rs | 72 +++++++++++++++++++++++++++++- src/databases/mysql.rs | 67 ++++++++++++++-------------- src/databases/sqlite.rs | 71 +++++++++++++++-------------- src/tracker/mod.rs | 21 +++++---- 8 files changed, 180 insertions(+), 179 deletions(-) delete mode 100644 src/databases/database.rs create mode 100644 src/databases/driver.rs create mode 100644 src/databases/error.rs diff --git a/src/config.rs b/src/config.rs index 67177aca1..a7e7e9df6 100644 --- a/src/config.rs +++ b/src/config.rs @@ -9,7 +9,7 @@ use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; -use crate::databases::database::Drivers; +use crate::databases::driver::Driver; use crate::tracker::mode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] @@ -42,7 +42,7 @@ pub struct HttpApi { pub struct Configuration { pub log_level: Option, pub mode: mode::Mode, - pub db_driver: Drivers, + pub db_driver: Driver, pub db_path: String, pub announce_interval: u32, pub min_announce_interval: u32, @@ -98,7 +98,7 @@ impl Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: mode::Mode::Public, - db_driver: Drivers::Sqlite3, + db_driver: Driver::Sqlite3, db_path: String::from("data.db"), announce_interval: 120, min_announce_interval: 120, diff --git a/src/databases/database.rs b/src/databases/database.rs deleted file mode 100644 index a4dae57ee..000000000 --- a/src/databases/database.rs +++ /dev/null @@ -1,94 +0,0 @@ -use async_trait::async_trait; -use derive_more::{Display, Error}; -use serde::{Deserialize, Serialize}; - -use crate::databases::mysql::Mysql; -use crate::databases::sqlite::Sqlite; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth; - -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub enum Drivers { - Sqlite3, - MySQL, -} - -/// # Errors -/// -/// Will return `r2d2::Error` if `db_path` is not able to create a database. -pub fn connect(db_driver: &Drivers, db_path: &str) -> Result, r2d2::Error> { - let database: Box = match db_driver { - Drivers::Sqlite3 => { - let db = Sqlite::new(db_path)?; - Box::new(db) - } - Drivers::MySQL => { - let db = Mysql::new(db_path)?; - Box::new(db) - } - }; - - database.create_database_tables().expect("Could not create database tables."); - - Ok(database) -} - -#[async_trait] -pub trait Database: Sync + Send { - /// # Errors - /// - /// Will return `Error` if unable to create own tables. - fn create_database_tables(&self) -> Result<(), Error>; - - async fn load_persistent_torrents(&self) -> Result, Error>; - - async fn load_keys(&self) -> Result, Error>; - - async fn load_whitelist(&self) -> Result, Error>; - - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; - - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; - - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; - - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - - async fn get_key_from_keys(&self, key: &str) -> Result; - - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; - - async fn remove_key_from_keys(&self, key: &str) -> Result; - - async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - self.get_info_hash_from_whitelist(&info_hash.clone().to_string()) - .await - .map_or_else( - |e| match e { - Error::QueryReturnedNoRows => Ok(false), - e => Err(e), - }, - |_| Ok(true), - ) - } -} - -#[derive(Debug, Display, PartialEq, Eq, Error)] -#[allow(dead_code)] -pub enum Error { - #[display(fmt = "Query returned no rows.")] - QueryReturnedNoRows, - #[display(fmt = "Invalid query.")] - InvalidQuery, - #[display(fmt = "Database error.")] - DatabaseError, -} - -impl From for Error { - fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - match e { - r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, - _ => Error::InvalidQuery, - } - } -} diff --git a/src/databases/driver.rs b/src/databases/driver.rs new file mode 100644 index 000000000..7eaa9064e --- /dev/null +++ b/src/databases/driver.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +pub enum Driver { + Sqlite3, + MySQL, +} diff --git a/src/databases/error.rs b/src/databases/error.rs new file mode 100644 index 000000000..467db407f --- /dev/null +++ b/src/databases/error.rs @@ -0,0 +1,21 @@ +use derive_more::{Display, Error}; + +#[derive(Debug, Display, PartialEq, Eq, Error)] +#[allow(dead_code)] +pub enum Error { + #[display(fmt = "Query returned no rows.")] + QueryReturnedNoRows, + #[display(fmt = "Invalid query.")] + InvalidQuery, + #[display(fmt = "Database error.")] + DatabaseError, +} + +impl From for Error { + fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { + match e { + r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, + _ => Error::InvalidQuery, + } + } +} diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 169d99f4d..c1d265b56 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -1,3 +1,73 @@ -pub mod database; +pub mod driver; +pub mod error; pub mod mysql; pub mod sqlite; + +use async_trait::async_trait; + +use self::driver::Driver; +use self::error::Error; +use crate::databases::mysql::Mysql; +use crate::databases::sqlite::Sqlite; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth; + +/// # Errors +/// +/// Will return `r2d2::Error` if `db_path` is not able to create a database. +pub fn connect(db_driver: &Driver, db_path: &str) -> Result, r2d2::Error> { + let database: Box = match db_driver { + Driver::Sqlite3 => { + let db = Sqlite::new(db_path)?; + Box::new(db) + } + Driver::MySQL => { + let db = Mysql::new(db_path)?; + Box::new(db) + } + }; + + database.create_database_tables().expect("Could not create database tables."); + + Ok(database) +} + +#[async_trait] +pub trait Database: Sync + Send { + /// # Errors + /// + /// Will return `Error` if unable to create own tables. + fn create_database_tables(&self) -> Result<(), Error>; + + async fn load_persistent_torrents(&self) -> Result, Error>; + + async fn load_keys(&self) -> Result, Error>; + + async fn load_whitelist(&self) -> Result, Error>; + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; + + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + + async fn get_key_from_keys(&self, key: &str) -> Result; + + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; + + async fn remove_key_from_keys(&self, key: &str) -> Result; + + async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { + self.get_info_hash_from_whitelist(&info_hash.clone().to_string()) + .await + .map_or_else( + |e| match e { + Error::QueryReturnedNoRows => Ok(false), + e => Err(e), + }, + |_| Ok(true), + ) + } +} diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 0d79315c6..8322b2273 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -8,8 +8,7 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MysqlConnectionManager; -use crate::databases::database; -use crate::databases::database::{Database, Error}; +use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; @@ -36,7 +35,7 @@ impl Mysql { #[async_trait] impl Database for Mysql { - fn create_database_tables(&self) -> Result<(), database::Error> { + fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTO_INCREMENT, @@ -64,7 +63,7 @@ impl Database for Mysql { i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") ); - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; conn.query_drop(&create_torrents_table) .expect("Could not create torrents table."); @@ -75,8 +74,8 @@ impl Database for Mysql { Ok(()) } - async fn load_persistent_torrents(&self) -> Result, database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn load_persistent_torrents(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let torrents: Vec<(InfoHash, u32)> = conn .query_map( @@ -86,13 +85,13 @@ impl Database for Mysql { (info_hash, completed) }, ) - .map_err(|_| database::Error::QueryReturnedNoRows)?; + .map_err(|_| Error::QueryReturnedNoRows)?; Ok(torrents) } async fn load_keys(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let keys: Vec = conn .query_map( @@ -102,25 +101,25 @@ impl Database for Mysql { valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, ) - .map_err(|_| database::Error::QueryReturnedNoRows)?; + .map_err(|_| Error::QueryReturnedNoRows)?; Ok(keys) } async fn load_whitelist(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hashes: Vec = conn .query_map("SELECT info_hash FROM whitelist", |info_hash: String| { InfoHash::from_str(&info_hash).unwrap() }) - .map_err(|_| database::Error::QueryReturnedNoRows)?; + .map_err(|_| Error::QueryReturnedNoRows)?; Ok(info_hashes) } - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -132,28 +131,28 @@ impl Database for Mysql { } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn .exec_first::( "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }, ) - .map_err(|_| database::Error::DatabaseError)? + .map_err(|_| Error::DatabaseError)? { Some(info_hash) => Ok(InfoHash::from_str(&info_hash).unwrap()), - None => Err(database::Error::QueryReturnedNoRows), + None => Err(Error::QueryReturnedNoRows), } } - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -164,13 +163,13 @@ impl Database for Mysql { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hash = info_hash.to_string(); @@ -178,28 +177,28 @@ impl Database for Mysql { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) - .map_err(|_| database::Error::QueryReturnedNoRows)? + .map_err(|_| Error::QueryReturnedNoRows)? { Some((key, valid_until)) => Ok(auth::Key { key, valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }), - None => Err(database::Error::InvalidQuery), + None => Err(Error::InvalidQuery), } } - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); @@ -211,19 +210,19 @@ impl Database for Mysql { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_key_from_keys(&self, key: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index c42e9382d..c5401aacf 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -5,8 +5,7 @@ use log::debug; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use crate::databases::database; -use crate::databases::database::{Database, Error}; +use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; @@ -28,7 +27,7 @@ impl Sqlite { #[async_trait] impl Database for Sqlite { - fn create_database_tables(&self) -> Result<(), database::Error> { + fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -52,17 +51,17 @@ impl Database for Sqlite { );" .to_string(); - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; conn.execute(&create_whitelist_table, []) .and_then(|_| conn.execute(&create_keys_table, [])) .and_then(|_| conn.execute(&create_torrents_table, [])) - .map_err(|_| database::Error::InvalidQuery) + .map_err(|_| Error::InvalidQuery) .map(|_| ()) } - async fn load_persistent_torrents(&self) -> Result, database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn load_persistent_torrents(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -79,7 +78,7 @@ impl Database for Sqlite { } async fn load_keys(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -99,7 +98,7 @@ impl Database for Sqlite { } async fn load_whitelist(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; @@ -114,8 +113,8 @@ impl Database for Sqlite { Ok(info_hashes) } - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute( "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", @@ -125,17 +124,17 @@ impl Database for Sqlite { if updated > 0 { return Ok(()); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; let mut rows = stmt.query([info_hash])?; @@ -143,51 +142,51 @@ impl Database for Sqlite { match rows.next() { Ok(row) => match row { Some(row) => Ok(InfoHash::from_str(&row.get_unwrap::<_, String>(0)).unwrap()), - None => Err(database::Error::QueryReturnedNoRows), + None => Err(Error::QueryReturnedNoRows), }, Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; let mut rows = stmt.query([key.to_string()])?; @@ -201,12 +200,12 @@ impl Database for Sqlite { valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) } else { - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } } - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", @@ -216,28 +215,28 @@ impl Database for Sqlite { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_key_from_keys(&self, key: &str) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute("DELETE FROM keys WHERE key = ?", [key]) { Ok(updated) => { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 806efee54..bd2da93f0 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -14,8 +14,7 @@ use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::config::Configuration; -use crate::databases::database; -use crate::databases::database::Database; +use crate::databases::{self, Database}; use crate::protocol::info_hash::InfoHash; pub struct Tracker { @@ -38,7 +37,7 @@ impl Tracker { stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = database::connect(&config.db_driver, &config.db_path)?; + let database = databases::connect(&config.db_driver, &config.db_path)?; Ok(Tracker { config: config.clone(), @@ -67,7 +66,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); @@ -77,7 +76,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` to the database. - pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { + pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(key); Ok(()) @@ -96,7 +95,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to `load_keys` from the database. - pub async fn load_keys(&self) -> Result<(), database::Error> { + pub async fn load_keys(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.database.load_keys().await?; let mut keys = self.keys.write().await; @@ -114,14 +113,14 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.add_torrent_to_database_whitelist(info_hash).await?; self.add_torrent_to_memory_whitelist(info_hash).await; Ok(()) } /// It adds a torrent to the whitelist if it has not been whitelisted previously - async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { if self.database.is_info_hash_whitelisted(info_hash).await.unwrap() { return Ok(()); } @@ -140,7 +139,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.database.remove_info_hash_from_whitelist(*info_hash).await?; self.whitelist.write().await.remove(info_hash); Ok(()) @@ -153,7 +152,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub async fn load_whitelist(&self) -> Result<(), database::Error> { + pub async fn load_whitelist(&self) -> Result<(), databases::error::Error> { let whitelisted_torrents_from_database = self.database.load_whitelist().await?; let mut whitelist = self.whitelist.write().await; @@ -206,7 +205,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { + pub async fn load_persistent_torrents(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; let mut torrents = self.torrents.write().await; From 32eb44b318ddc57b574bae940ee384984bf7a24e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 28 Nov 2022 20:08:37 +0100 Subject: [PATCH 195/435] refactor: rename inside http --- src/http/filters.rs | 21 ++++++++++----------- src/http/handlers.rs | 17 ++++++++--------- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/src/http/filters.rs b/src/http/filters.rs index e9432e191..0fe369eba 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -6,8 +6,7 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; -use super::request::{Announce, AnnounceQuery, Scrape}; -use super::WebResult; +use super::{request, WebResult}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, auth, peer}; @@ -51,10 +50,10 @@ pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { - warp::filters::query::query::() +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { + warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) .and(with_peer_addr(on_reverse_proxy)) @@ -63,7 +62,7 @@ pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter impl Filter + Clone { +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) @@ -162,12 +161,12 @@ fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, peer_id: peer::Id, peer_addr: IpAddr, -) -> WebResult { - Ok(Announce { +) -> WebResult { + Ok(request::Announce { info_hash: info_hashes[0], peer_addr, downloaded: announce_request_query.downloaded.unwrap_or(0), @@ -182,6 +181,6 @@ fn announce_request( /// Parse `ScrapeRequest` from `InfoHash` #[allow(clippy::unnecessary_wraps)] -fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(Scrape { info_hashes, peer_addr }) +fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { + Ok(request::Scrape { info_hashes, peer_addr }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 8d8816885..0e230e785 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -8,8 +8,7 @@ use warp::http::Response; use warp::{reject, Rejection, Reply}; use super::error::Error; -use super::response::{self, Peer, ScrapeEntry}; -use super::{request, WebResult}; +use super::{request, response, WebResult}; use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, auth, peer, statistics, torrent}; @@ -89,7 +88,7 @@ pub async fn handle_scrape( auth_key: Option, tracker: Arc, ) -> WebResult { - let mut files: HashMap = HashMap::new(); + let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; for info_hash in &scrape_request.info_hashes { @@ -97,20 +96,20 @@ pub async fn handle_scrape( Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeEntry { + response::ScrapeEntry { complete: seeders, downloaded: completed, incomplete: leechers, } } else { - ScrapeEntry { + response::ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, } } } - None => ScrapeEntry { + None => response::ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, @@ -142,9 +141,9 @@ fn send_announce_response( interval: u32, interval_min: u32, ) -> WebResult { - let http_peers: Vec = peers + let http_peers: Vec = peers .iter() - .map(|peer| Peer { + .map(|peer| response::Peer { peer_id: peer.peer_id.to_string(), ip: peer.peer_addr.ip(), port: peer.peer_addr.port(), @@ -171,7 +170,7 @@ fn send_announce_response( } /// Send scrape response -fn send_scrape_response(files: HashMap) -> WebResult { +fn send_scrape_response(files: HashMap) -> WebResult { let res = response::Scrape { files }; match res.write() { From 49a6acbcb922d24016a4d33a91bf4e89f0c09cf8 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 23 Nov 2022 17:21:10 +0100 Subject: [PATCH 196/435] ci: clippy warning as errors --- .github/workflows/test_build_release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 4acf14277..3924eea4b 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -45,7 +45,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: --all-targets + args: --all-targets -- -D clippy::pedantic - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Run Tests From 01e71bfe5c0a0c7cd0e54af8096cd6adf8d67efe Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 30 Nov 2022 15:31:52 +0100 Subject: [PATCH 197/435] clippy: fix src/tracker/peer.rs --- src/api/resources/torrent_resource.rs | 6 +-- src/http/handlers.rs | 5 +-- src/tracker/mod.rs | 6 +-- src/tracker/peer.rs | 54 +++++++++++++-------------- src/tracker/torrent.rs | 16 ++++---- src/udp/handlers.rs | 8 ++-- tests/api.rs | 6 +-- 7 files changed, 49 insertions(+), 52 deletions(-) diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs index 4063b95f5..bc1a9acf5 100644 --- a/src/api/resources/torrent_resource.rs +++ b/src/api/resources/torrent_resource.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use crate::tracker::peer::{self, TorrentPeer}; +use crate::tracker::peer::{self, Peer}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct TorrentResource { @@ -50,9 +50,9 @@ impl From for PeerIdResource { } } -impl From for TorrentPeerResource { +impl From for TorrentPeerResource { #[allow(deprecated)] - fn from(peer: TorrentPeer) -> Self { + fn from(peer: Peer) -> Self { TorrentPeerResource { peer_id: PeerIdResource::from(peer.peer_id), peer_addr: peer.peer_addr.to_string(), diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 0e230e785..2fc878354 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -48,8 +48,7 @@ pub async fn handle_announce( debug!("{:?}", announce_request); - let peer = - peer::TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + let peer = peer::Peer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) .await; @@ -137,7 +136,7 @@ pub async fn handle_scrape( fn send_announce_response( announce_request: &request::Announce, torrent_stats: &torrent::Stats, - peers: &Vec, + peers: &Vec, interval: u32, interval_min: u32, ) -> WebResult { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index bd2da93f0..4b2dabebb 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -227,7 +227,7 @@ impl Tracker { } /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -237,7 +237,7 @@ impl Tracker { } /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { @@ -246,7 +246,7 @@ impl Tracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::TorrentPeer) -> torrent::Stats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::Stats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 16aada0ed..2da257d3e 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -10,7 +10,7 @@ use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; #[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] -pub struct TorrentPeer { +pub struct Peer { pub peer_id: Id, pub peer_addr: SocketAddr, #[serde(serialize_with = "ser_unix_time_value")] @@ -25,16 +25,16 @@ pub struct TorrentPeer { pub event: AnnounceEvent, } -impl TorrentPeer { +impl Peer { #[must_use] pub fn from_udp_announce_request( announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option, ) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); + let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - TorrentPeer { + Peer { peer_id: Id(announce_request.peer_id.0), peer_addr, updated: Current::now(), @@ -47,7 +47,7 @@ impl TorrentPeer { #[must_use] pub fn from_http_announce_request(announce_request: &Announce, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); + let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); let event: AnnounceEvent = if let Some(event) = &announce_request.event { match event.as_ref() { @@ -61,8 +61,8 @@ impl TorrentPeer { }; #[allow(clippy::cast_possible_truncation)] - TorrentPeer { - peer_id: announce_request.peer_id.clone(), + Peer { + peer_id: announce_request.peer_id, peer_addr, updated: Current::now(), uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), @@ -104,6 +104,9 @@ impl std::fmt::Display for Id { impl Id { #[must_use] + /// # Panics + /// + /// It will panic if the `binascii::bin2hex` from a too-small output buffer. pub fn get_id(&self) -> Option { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; @@ -202,11 +205,6 @@ impl Serialize for Id { client: Option<&'a str>, } - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - let id = std::str::from_utf8(&tmp).ok(); - let obj = PeerIdInfo { id: self.get_id(), client: self.get_client_name(), @@ -224,11 +222,11 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::protocol::clock::{Current, Time}; - use crate::tracker::peer::{self, TorrentPeer}; + use crate::tracker::peer::{self, Peer}; #[test] fn it_should_be_serializable() { - let torrent_peer = TorrentPeer { + let torrent_peer = Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), @@ -256,7 +254,7 @@ mod test { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer::Peer; use crate::udp::connection_cookie::{into_connection_id, make}; // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. @@ -308,7 +306,7 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -318,7 +316,7 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -329,14 +327,14 @@ mod test { use std::str::FromStr; use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer::Peer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -347,7 +345,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -358,7 +356,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -370,14 +368,14 @@ mod test { use std::str::FromStr; use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::tracker::peer::TorrentPeer; + use crate::tracker::peer::Peer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -388,7 +386,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -399,7 +397,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -411,7 +409,7 @@ mod test { use crate::http::request::Announce; use crate::protocol::info_hash::InfoHash; - use crate::tracker::peer::{self, TorrentPeer}; + use crate::tracker::peer::{self, Peer}; fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { Announce { @@ -434,7 +432,7 @@ mod test { let ip_in_announce_request = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); let announce_request = sample_http_announce_request(ip_in_announce_request, 8080); - let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr.ip(), remote_ip); assert_ne!(torrent_peer.peer_addr.ip(), ip_in_announce_request); @@ -449,7 +447,7 @@ mod test { let announce_request = sample_http_announce_request(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), port_in_announce_request); - let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr.port(), announce_request.port); assert_ne!(torrent_peer.peer_addr.port(), remote_port); diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 3e38d2340..8058ab891 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -11,7 +11,7 @@ use crate::protocol::common::MAX_SCRAPE_TORRENTS; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct Entry { #[serde(skip)] - pub peers: std::collections::BTreeMap, + pub peers: std::collections::BTreeMap, pub completed: u32, } @@ -25,7 +25,7 @@ impl Entry { } // Update peer and return completed (times torrent has been downloaded) - pub fn update_peer(&mut self, peer: &peer::TorrentPeer) -> bool { + pub fn update_peer(&mut self, peer: &peer::Peer) -> bool { let mut did_torrent_stats_change: bool = false; match peer.event { @@ -49,7 +49,7 @@ impl Entry { } #[must_use] - pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&peer::TorrentPeer> { + pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&peer::Peer> { self.peers .values() .filter(|peer| match client_addr { @@ -122,12 +122,12 @@ mod tests { use crate::tracker::torrent::Entry; struct TorrentPeerBuilder { - peer: peer::TorrentPeer, + peer: peer::Peer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::TorrentPeer { + let default_peer = peer::Peer { peer_id: peer::Id([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), updated: Current::now(), @@ -164,14 +164,14 @@ mod tests { self } - pub fn into(self) -> peer::TorrentPeer { + pub fn into(self) -> peer::Peer { self.peer } } /// A torrent seeder is a peer with 0 bytes left to download which /// has not announced it has stopped - fn a_torrent_seeder() -> peer::TorrentPeer { + fn a_torrent_seeder() -> peer::Peer { TorrentPeerBuilder::default() .with_number_of_bytes_left(0) .with_event_completed() @@ -180,7 +180,7 @@ mod tests { /// A torrent leecher is a peer that is not a seeder. /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::TorrentPeer { + fn a_torrent_leecher() -> peer::Peer { TorrentPeerBuilder::default() .with_number_of_bytes_left(1) .with_event_completed() diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index da4bdbf35..625f42d40 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -92,7 +92,7 @@ pub async fn handle_announce( .authenticate_request(&wrapped_announce_request.info_hash, &None) .await?; - let peer = peer::TorrentPeer::from_udp_announce_request( + let peer = peer::Peer::from_udp_announce_request( &wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip(), @@ -289,12 +289,12 @@ mod tests { } struct TorrentPeerBuilder { - peer: peer::TorrentPeer, + peer: peer::Peer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::TorrentPeer { + let default_peer = peer::Peer { peer_id: peer::Id([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: Current::now(), @@ -321,7 +321,7 @@ mod tests { self } - pub fn into(self) -> peer::TorrentPeer { + pub fn into(self) -> peer::Peer { self.peer } } diff --git a/tests/api.rs b/tests/api.rs index 22a222698..824c198e2 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -24,7 +24,7 @@ mod tracker_api { use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth; - use torrust_tracker::tracker::peer::{self, TorrentPeer}; + use torrust_tracker::tracker::peer::{self, Peer}; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -187,8 +187,8 @@ mod tracker_api { ); } - fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { - let torrent_peer = TorrentPeer { + fn sample_torrent_peer() -> (Peer, TorrentPeerResource) { + let torrent_peer = Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), From 0f075e4daee476db840a4dbf98e3639a84ecd1bc Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 30 Nov 2022 17:57:13 +0100 Subject: [PATCH 198/435] refactor: src/api/resource(s) --- src/api/mod.rs | 2 +- src/api/{resources => resource}/auth_key.rs | 0 src/api/{resources => resource}/mod.rs | 7 +- src/api/resource/peer.rs | 47 +++++++++++++ .../stats_resource.rs => resource/stats.rs} | 2 +- src/api/resource/torrent.rs | 21 ++++++ src/api/resources/torrent_resource.rs | 67 ------------------- src/api/server.rs | 15 +++-- src/http/handlers.rs | 2 +- src/tracker/mod.rs | 4 +- src/tracker/torrent.rs | 2 +- tests/api.rs | 34 +++++----- 12 files changed, 103 insertions(+), 100 deletions(-) rename src/api/{resources => resource}/auth_key.rs (100%) rename src/api/{resources => resource}/mod.rs (55%) create mode 100644 src/api/resource/peer.rs rename src/api/{resources/stats_resource.rs => resource/stats.rs} (95%) create mode 100644 src/api/resource/torrent.rs delete mode 100644 src/api/resources/torrent_resource.rs diff --git a/src/api/mod.rs b/src/api/mod.rs index 46ad24218..16abb8e27 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,2 +1,2 @@ -pub mod resources; +pub mod resource; pub mod server; diff --git a/src/api/resources/auth_key.rs b/src/api/resource/auth_key.rs similarity index 100% rename from src/api/resources/auth_key.rs rename to src/api/resource/auth_key.rs diff --git a/src/api/resources/mod.rs b/src/api/resource/mod.rs similarity index 55% rename from src/api/resources/mod.rs rename to src/api/resource/mod.rs index f708fc2e4..e86c550ca 100644 --- a/src/api/resources/mod.rs +++ b/src/api/resource/mod.rs @@ -3,10 +3,11 @@ //! WIP. Not all endpoints have their resource structs. //! //! - [x] `AuthKeys` -//! - [ ] `TorrentResource`, `TorrentListItemResource`, `TorrentPeerResource`, `PeerIdResource` +//! - [ ] `Torrent`, `ListItem`, `Peer`, `PeerId` //! - [ ] `StatsResource` //! - [ ] ... pub mod auth_key; -pub mod stats_resource; -pub mod torrent_resource; +pub mod peer; +pub mod stats; +pub mod torrent; diff --git a/src/api/resource/peer.rs b/src/api/resource/peer.rs new file mode 100644 index 000000000..ff84be197 --- /dev/null +++ b/src/api/resource/peer.rs @@ -0,0 +1,47 @@ +use serde::{Deserialize, Serialize}; + +use crate::tracker; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Peer { + pub peer_id: Id, + pub peer_addr: String, + #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] + pub updated: u128, + pub updated_milliseconds_ago: u128, + pub uploaded: i64, + pub downloaded: i64, + pub left: i64, + pub event: String, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Id { + pub id: Option, + pub client: Option, +} + +impl From for Id { + fn from(peer_id: tracker::peer::Id) -> Self { + Id { + id: peer_id.get_id(), + client: peer_id.get_client_name().map(std::string::ToString::to_string), + } + } +} + +impl From for Peer { + #[allow(deprecated)] + fn from(peer: tracker::peer::Peer) -> Self { + Peer { + peer_id: Id::from(peer.peer_id), + peer_addr: peer.peer_addr.to_string(), + updated: peer.updated.as_millis(), + updated_milliseconds_ago: peer.updated.as_millis(), + uploaded: peer.uploaded.0, + downloaded: peer.downloaded.0, + left: peer.left.0, + event: format!("{:?}", peer.event), + } + } +} diff --git a/src/api/resources/stats_resource.rs b/src/api/resource/stats.rs similarity index 95% rename from src/api/resources/stats_resource.rs rename to src/api/resource/stats.rs index e6f184897..e87f08f63 100644 --- a/src/api/resources/stats_resource.rs +++ b/src/api/resource/stats.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct StatsResource { +pub struct Stats { pub torrents: u32, pub seeders: u32, pub completed: u32, diff --git a/src/api/resource/torrent.rs b/src/api/resource/torrent.rs new file mode 100644 index 000000000..924b61b8c --- /dev/null +++ b/src/api/resource/torrent.rs @@ -0,0 +1,21 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Torrent { + pub info_hash: String, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + #[serde(skip_serializing_if = "Option::is_none")] + pub peers: Option>, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ListItem { + pub info_hash: String, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + // todo: this is always None. Remove field from endpoint? + pub peers: Option>, +} diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs deleted file mode 100644 index bc1a9acf5..000000000 --- a/src/api/resources/torrent_resource.rs +++ /dev/null @@ -1,67 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use crate::tracker::peer::{self, Peer}; - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct TorrentResource { - pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, - #[serde(skip_serializing_if = "Option::is_none")] - pub peers: Option>, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct TorrentListItemResource { - pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, - // todo: this is always None. Remove field from endpoint? - pub peers: Option>, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct TorrentPeerResource { - pub peer_id: PeerIdResource, - pub peer_addr: String, - #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] - pub updated: u128, - pub updated_milliseconds_ago: u128, - pub uploaded: i64, - pub downloaded: i64, - pub left: i64, - pub event: String, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct PeerIdResource { - pub id: Option, - pub client: Option, -} - -impl From for PeerIdResource { - fn from(peer_id: peer::Id) -> Self { - PeerIdResource { - id: peer_id.get_id(), - client: peer_id.get_client_name().map(std::string::ToString::to_string), - } - } -} - -impl From for TorrentPeerResource { - #[allow(deprecated)] - fn from(peer: Peer) -> Self { - TorrentPeerResource { - peer_id: PeerIdResource::from(peer.peer_id), - peer_addr: peer.peer_addr.to_string(), - updated: peer.updated.as_millis(), - updated_milliseconds_ago: peer.updated.as_millis(), - uploaded: peer.uploaded.0, - downloaded: peer.downloaded.0, - left: peer.left.0, - event: format!("{:?}", peer.event), - } - } -} diff --git a/src/api/server.rs b/src/api/server.rs index af2d66458..5967a8be4 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -7,9 +7,10 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; -use super::resources::auth_key::AuthKey; -use super::resources::stats_resource::StatsResource; -use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; +use super::resource::auth_key::AuthKey; +use super::resource::peer; +use super::resource::stats::Stats; +use super::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; use crate::tracker; @@ -81,7 +82,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl w .iter() .map(|(info_hash, torrent_entry)| { let (seeders, completed, leechers) = torrent_entry.get_stats(); - TorrentListItemResource { + ListItem { info_hash: info_hash.to_string(), seeders, completed, @@ -104,7 +105,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl w .and(filters::path::end()) .map(move || api_stats.clone()) .and_then(|tracker: Arc| async move { - let mut results = StatsResource { + let mut results = Stats { torrents: 0, seeders: 0, completed: 0, @@ -179,9 +180,9 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl w let peers = torrent_entry.get_peers(None); - let peer_resources = peers.iter().map(|peer| TorrentPeerResource::from(**peer)).collect(); + let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); - Ok(reply::json(&TorrentResource { + Ok(reply::json(&Torrent { info_hash: info_hash.to_string(), seeders, completed, diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 2fc878354..1170b7188 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -135,7 +135,7 @@ pub async fn handle_scrape( #[allow(clippy::ptr_arg)] fn send_announce_response( announce_request: &request::Announce, - torrent_stats: &torrent::Stats, + torrent_stats: &torrent::SwamStats, peers: &Vec, interval: u32, interval_min: u32, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 4b2dabebb..4469d682b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -246,7 +246,7 @@ impl Tracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::Stats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwamStats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { @@ -266,7 +266,7 @@ impl Tracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrent::Stats { + torrent::SwamStats { completed, seeders, leechers, diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 8058ab891..e292dff54 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -93,7 +93,7 @@ impl Default for Entry { } #[derive(Debug)] -pub struct Stats { +pub struct SwamStats { pub completed: u32, pub seeders: u32, pub leechers: u32, diff --git a/tests/api.rs b/tests/api.rs index 824c198e2..706cd0b8d 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -16,16 +16,16 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; use tokio::task::JoinHandle; - use torrust_tracker::api::resources::auth_key::AuthKey; - use torrust_tracker::api::resources::stats_resource::StatsResource; - use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; + use torrust_tracker::api::resource; + use torrust_tracker::api::resource::auth_key::AuthKey; + use torrust_tracker::api::resource::stats::Stats; + use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth; - use torrust_tracker::tracker::peer::{self, Peer}; use torrust_tracker::tracker::statistics::Keeper; + use torrust_tracker::tracker::{auth, peer}; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; @@ -104,7 +104,7 @@ mod tracker_api { assert_eq!( torrent_resource, - TorrentResource { + Torrent { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, @@ -135,7 +135,7 @@ mod tracker_api { assert_eq!( torrent_resources, - vec![TorrentListItemResource { + vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, @@ -166,7 +166,7 @@ mod tracker_api { assert_eq!( stats_resource, - StatsResource { + Stats { torrents: 1, seeders: 1, completed: 0, @@ -187,8 +187,8 @@ mod tracker_api { ); } - fn sample_torrent_peer() -> (Peer, TorrentPeerResource) { - let torrent_peer = Peer { + fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { + let torrent_peer = peer::Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), @@ -197,7 +197,7 @@ mod tracker_api { left: NumberOfBytes(0), event: AnnounceEvent::Started, }; - let torrent_peer_resource = TorrentPeerResource::from(torrent_peer); + let torrent_peer_resource = resource::peer::Peer::from(torrent_peer); (torrent_peer, torrent_peer_resource) } @@ -326,7 +326,7 @@ mod tracker_api { reqwest::Client::new().post(url.clone()).send().await.unwrap() } - pub async fn get_torrent(&self, info_hash: &str) -> TorrentResource { + pub async fn get_torrent(&self, info_hash: &str) -> Torrent { let url = format!( "http://{}/api/torrent/{}?token={}", &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token @@ -338,12 +338,12 @@ mod tracker_api { .send() .await .unwrap() - .json::() + .json::() .await .unwrap() } - pub async fn get_torrents(&self) -> Vec { + pub async fn get_torrents(&self) -> Vec { let url = format!( "http://{}/api/torrents?token={}", &self.connection_info.bind_address, &self.connection_info.api_token @@ -355,12 +355,12 @@ mod tracker_api { .send() .await .unwrap() - .json::>() + .json::>() .await .unwrap() } - pub async fn get_tracker_statistics(&self) -> StatsResource { + pub async fn get_tracker_statistics(&self) -> Stats { let url = format!( "http://{}/api/stats?token={}", &self.connection_info.bind_address, &self.connection_info.api_token @@ -372,7 +372,7 @@ mod tracker_api { .send() .await .unwrap() - .json::() + .json::() .await .unwrap() } From dca755000d3f6b45435e9771c0cbcaeea4fa3680 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 Dec 2022 11:56:10 +0000 Subject: [PATCH 199/435] fix: [#125] using default sqlite db for tests I was using the deafult DB configuration for some tests. Every test has to use its own database and the database should be located in a different location than the production default location. --- src/udp/handlers.rs | 53 +++++++++++++++++++++++++++++++++------------ tests/udp.rs | 12 +++++++++- 2 files changed, 50 insertions(+), 15 deletions(-) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 625f42d40..d167b3e6d 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -239,17 +239,42 @@ fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { #[cfg(test)] mod tests { + use std::env; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use rand::{thread_rng, Rng}; use crate::config::Configuration; use crate::protocol::clock::{Current, Time}; use crate::tracker::{self, mode, peer, statistics}; - fn default_tracker_config() -> Arc { - Arc::new(Configuration::default()) + fn tracker_configuration() -> Arc { + Arc::new(default_testing_tracker_configuration()) + } + + fn default_testing_tracker_configuration() -> Configuration { + let mut config = Configuration::default(); + config.log_level = Some("off".to_owned()); + + // Ephemeral socket address + let port = ephemeral_random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &port); + + // Ephemeral database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + config + } + + fn ephemeral_random_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) } fn initialized_public_tracker() -> Arc { @@ -332,7 +357,7 @@ mod tests { impl TrackerConfigurationBuilder { pub fn default() -> TrackerConfigurationBuilder { - let default_configuration = Configuration::default(); + let default_configuration = default_testing_tracker_configuration(); TrackerConfigurationBuilder { configuration: default_configuration, } @@ -361,7 +386,7 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use mockall::predicate::eq; - use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; + use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_connect; @@ -424,7 +449,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await @@ -442,7 +467,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await @@ -538,7 +563,7 @@ mod tests { use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, + initialized_public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, }; #[tokio::test] @@ -685,7 +710,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -761,7 +786,7 @@ mod tests { use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, + initialized_public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, }; #[tokio::test] @@ -915,7 +940,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -1216,7 +1241,7 @@ mod tests { use super::sample_scrape_request; use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -1230,7 +1255,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1248,7 +1273,7 @@ mod tests { use super::sample_scrape_request; use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; + use crate::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { @@ -1262,7 +1287,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/tests/udp.rs b/tests/udp.rs index 8bad37dbe..5f7a66856 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -7,6 +7,7 @@ mod common; mod udp_tracker_server { use core::panic; + use std::env; use std::io::Cursor; use std::net::Ipv4Addr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -29,7 +30,16 @@ mod udp_tracker_server { fn tracker_configuration() -> Arc { let mut config = Configuration::default(); config.log_level = Some("off".to_owned()); - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", ephemeral_random_port()); + + // Ephemeral socket address + let port = ephemeral_random_port(); + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &port); + + // Ephemeral database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + Arc::new(config) } From 5af28a291c6f1d9b6197649d8ec6394d1582f3fe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 09:39:51 +0000 Subject: [PATCH 200/435] fix: the pedantic clippy warnings --- src/config.rs | 4 ++-- src/protocol/info_hash.rs | 2 +- src/tracker/auth.rs | 2 +- src/udp/handlers.rs | 6 ++++-- tests/api.rs | 6 ++++-- tests/udp.rs | 8 +++++--- 6 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/config.rs b/src/config.rs index a7e7e9df6..ba99e0f45 100644 --- a/src/config.rs +++ b/src/config.rs @@ -74,7 +74,7 @@ impl std::fmt::Display for Error { Error::ConfigError(e) => e.fmt(f), Error::IOError(e) => e.fmt(f), Error::ParseError(e) => e.fmt(f), - Error::TrackerModeIncompatible => write!(f, "{:?}", self), + Error::TrackerModeIncompatible => write!(f, "{self:?}"), } } } @@ -296,6 +296,6 @@ mod tests { fn configuration_error_could_be_displayed() { let error = Error::TrackerModeIncompatible; - assert_eq!(format!("{}", error), "TrackerModeIncompatible"); + assert_eq!(format!("{error}"), "TrackerModeIncompatible"); } } diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 3b9b2fa35..9a0900063 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -133,7 +133,7 @@ mod tests { fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - let output = format!("{}", info_hash); + let output = format!("{info_hash}"); assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); } diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 7ac6d6939..02450dc82 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -90,7 +90,7 @@ pub enum Error { impl From for Error { fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - eprintln!("{}", e); + eprintln!("{e}"); Error::KeyVerificationError } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index d167b3e6d..001fb2380 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -255,8 +255,10 @@ mod tests { } fn default_testing_tracker_configuration() -> Configuration { - let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; // Ephemeral socket address let port = ephemeral_random_port(); diff --git a/tests/api.rs b/tests/api.rs index 706cd0b8d..84ddac573 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -203,8 +203,10 @@ mod tracker_api { } fn tracker_configuration() -> Arc { - let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; // Ephemeral socket address let port = ephemeral_random_port(); diff --git a/tests/udp.rs b/tests/udp.rs index 5f7a66856..55384db05 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -28,8 +28,10 @@ mod udp_tracker_server { use crate::common::ephemeral_random_port; fn tracker_configuration() -> Arc { - let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; // Ephemeral socket address let port = ephemeral_random_port(); @@ -181,7 +183,7 @@ mod udp_tracker_server { /// Generates the source address for the UDP client fn source_address(port: u16) -> String { - format!("127.0.0.1:{}", port) + format!("127.0.0.1:{port}") } fn is_error_response(response: &Response, error_message: &str) -> bool { From b23d64b9c3d58a6f4f7dab8a60775fc234aaadbd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 08:49:56 +0000 Subject: [PATCH 201/435] feat: add ssl support for the API New config options have been added to support HTTPs conenctionto the API: ``` [http_api] ssl_enabled = false ssl_cert_path = "./storage/ssl_certificates/localhost.crt" ssl_key_path = "./storage/ssl_certificates/localhost.key" ``` --- src/api/mod.rs | 18 +++ src/api/routes.rs | 307 ++++++++++++++++++++++++++++++++++++ src/api/server.rs | 333 +++------------------------------------ src/config.rs | 41 +++-- src/jobs/http_tracker.rs | 4 +- src/jobs/tracker_api.rs | 26 +-- src/jobs/udp_tracker.rs | 4 +- src/setup.rs | 2 +- tests/api.rs | 2 +- 9 files changed, 393 insertions(+), 344 deletions(-) create mode 100644 src/api/routes.rs diff --git a/src/api/mod.rs b/src/api/mod.rs index 16abb8e27..d254c91ac 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,2 +1,20 @@ pub mod resource; +pub mod routes; pub mod server; + +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize, Debug)] +pub struct TorrentInfoQuery { + offset: Option, + limit: Option, +} + +#[derive(Serialize, Debug)] +#[serde(tag = "status", rename_all = "snake_case")] +enum ActionStatus<'a> { + Ok, + Err { reason: std::borrow::Cow<'a, str> }, +} + +impl warp::reject::Reject for ActionStatus<'static> {} diff --git a/src/api/routes.rs b/src/api/routes.rs new file mode 100644 index 000000000..76b449e9b --- /dev/null +++ b/src/api/routes.rs @@ -0,0 +1,307 @@ +use std::cmp::min; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use std::time::Duration; + +use serde::Deserialize; +use warp::{filters, reply, Filter}; + +use super::resource::auth_key::AuthKey; +use super::resource::peer; +use super::resource::stats::Stats; +use super::resource::torrent::{ListItem, Torrent}; +use super::{ActionStatus, TorrentInfoQuery}; +use crate::protocol::info_hash::InfoHash; +use crate::tracker; + +fn authenticate(tokens: HashMap) -> impl Filter + Clone { + #[derive(Deserialize)] + struct AuthToken { + token: Option, + } + + let tokens: HashSet = tokens.into_values().collect(); + + let tokens = Arc::new(tokens); + warp::filters::any::any() + .map(move || tokens.clone()) + .and(filters::query::query::()) + .and_then(|tokens: Arc>, token: AuthToken| async move { + match token.token { + Some(token) => { + if !tokens.contains(&token) { + return Err(warp::reject::custom(ActionStatus::Err { + reason: "token not valid".into(), + })); + } + + Ok(()) + } + None => Err(warp::reject::custom(ActionStatus::Err { + reason: "unauthorized".into(), + })), + } + }) + .untuple_one() +} + +#[allow(clippy::too_many_lines)] +#[must_use] +pub fn routes(tracker: &Arc) -> impl Filter + Clone { + // GET /api/torrents?offset=:u32&limit=:u32 + // View torrent list + let api_torrents = tracker.clone(); + let view_torrent_list = filters::method::get() + .and(filters::path::path("torrents")) + .and(filters::path::end()) + .and(filters::query::query()) + .map(move |limits| { + let tracker = api_torrents.clone(); + (limits, tracker) + }) + .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { + let offset = limits.offset.unwrap_or(0); + let limit = min(limits.limit.unwrap_or(1000), 4000); + + let db = tracker.get_torrents().await; + let results: Vec<_> = db + .iter() + .map(|(info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + ListItem { + info_hash: info_hash.to_string(), + seeders, + completed, + leechers, + peers: None, + } + }) + .skip(offset as usize) + .take(limit as usize) + .collect(); + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + }); + + // GET /api/stats + // View tracker status + let api_stats = tracker.clone(); + let view_stats_list = filters::method::get() + .and(filters::path::path("stats")) + .and(filters::path::end()) + .map(move || api_stats.clone()) + .and_then(|tracker: Arc| async move { + let mut results = Stats { + torrents: 0, + seeders: 0, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }; + + let db = tracker.get_torrents().await; + + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }); + + let stats = tracker.get_stats().await; + + #[allow(clippy::cast_possible_truncation)] + { + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + } + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + }); + + // GET /api/torrent/:info_hash + // View torrent info + let t2 = tracker.clone(); + let view_torrent_info = filters::method::get() + .and(filters::path::path("torrent")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |info_hash: InfoHash| { + let tracker = t2.clone(); + (info_hash, tracker) + }) + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + let db = tracker.get_torrents().await; + let torrent_entry_option = db.get(&info_hash); + + let torrent_entry = match torrent_entry_option { + Some(torrent_entry) => torrent_entry, + None => { + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); + } + }; + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + let peers = torrent_entry.get_peers(None); + + let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); + + Ok(reply::json(&Torrent { + info_hash: info_hash.to_string(), + seeders, + completed, + leechers, + peers: Some(peer_resources), + })) + }); + + // DELETE /api/whitelist/:info_hash + // Delete info hash from whitelist + let t3 = tracker.clone(); + let delete_torrent = filters::method::delete() + .and(filters::path::path("whitelist")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |info_hash: InfoHash| { + let tracker = t3.clone(); + (info_hash, tracker) + }) + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to remove torrent from whitelist".into(), + })), + } + }); + + // POST /api/whitelist/:info_hash + // Add info hash to whitelist + let t4 = tracker.clone(); + let add_torrent = filters::method::post() + .and(filters::path::path("whitelist")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |info_hash: InfoHash| { + let tracker = t4.clone(); + (info_hash, tracker) + }) + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to whitelist torrent".into(), + })), + } + }); + + // POST /api/key/:seconds_valid + // Generate new key + let t5 = tracker.clone(); + let create_key = filters::method::post() + .and(filters::path::path("key")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |seconds_valid: u64| { + let tracker = t5.clone(); + (seconds_valid, tracker) + }) + .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to generate key".into(), + })), + } + }); + + // DELETE /api/key/:key + // Delete key + let t6 = tracker.clone(); + let delete_key = filters::method::delete() + .and(filters::path::path("key")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |key: String| { + let tracker = t6.clone(); + (key, tracker) + }) + .and_then(|(key, tracker): (String, Arc)| async move { + match tracker.remove_auth_key(&key).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to delete key".into(), + })), + } + }); + + // GET /api/whitelist/reload + // Reload whitelist + let t7 = tracker.clone(); + let reload_whitelist = filters::method::get() + .and(filters::path::path("whitelist")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || t7.clone()) + .and_then(|tracker: Arc| async move { + match tracker.load_whitelist().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload whitelist".into(), + })), + } + }); + + // GET /api/keys/reload + // Reload whitelist + let t8 = tracker.clone(); + let reload_keys = filters::method::get() + .and(filters::path::path("keys")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || t8.clone()) + .and_then(|tracker: Arc| async move { + match tracker.load_keys().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload keys".into(), + })), + } + }); + + let api_routes = filters::path::path("api").and( + view_torrent_list + .or(delete_torrent) + .or(view_torrent_info) + .or(view_stats_list) + .or(add_torrent) + .or(create_key) + .or(delete_key) + .or(reload_whitelist) + .or(reload_keys), + ); + + api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())) +} diff --git a/src/api/server.rs b/src/api/server.rs index 5967a8be4..5d6a3cdfd 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -1,327 +1,32 @@ -use std::cmp::min; -use std::collections::{HashMap, HashSet}; use std::net::SocketAddr; use std::sync::Arc; -use std::time::Duration; -use serde::{Deserialize, Serialize}; -use warp::{filters, reply, serve, Filter}; +use warp::serve; -use super::resource::auth_key::AuthKey; -use super::resource::peer; -use super::resource::stats::Stats; -use super::resource::torrent::{ListItem, Torrent}; -use crate::protocol::info_hash::InfoHash; +use super::routes::routes; use crate::tracker; -#[derive(Deserialize, Debug)] -struct TorrentInfoQuery { - offset: Option, - limit: Option, -} - -#[derive(Serialize, Debug)] -#[serde(tag = "status", rename_all = "snake_case")] -enum ActionStatus<'a> { - Ok, - Err { reason: std::borrow::Cow<'a, str> }, -} - -impl warp::reject::Reject for ActionStatus<'static> {} - -fn authenticate(tokens: HashMap) -> impl Filter + Clone { - #[derive(Deserialize)] - struct AuthToken { - token: Option, - } - - let tokens: HashSet = tokens.into_iter().map(|(_, v)| v).collect(); - - let tokens = Arc::new(tokens); - warp::filters::any::any() - .map(move || tokens.clone()) - .and(filters::query::query::()) - .and_then(|tokens: Arc>, token: AuthToken| async move { - match token.token { - Some(token) => { - if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { - reason: "token not valid".into(), - })); - } - - Ok(()) - } - None => Err(warp::reject::custom(ActionStatus::Err { - reason: "unauthorized".into(), - })), - } - }) - .untuple_one() -} - -#[allow(clippy::too_many_lines)] pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { - // GET /api/torrents?offset=:u32&limit=:u32 - // View torrent list - let api_torrents = tracker.clone(); - let view_torrent_list = filters::method::get() - .and(filters::path::path("torrents")) - .and(filters::path::end()) - .and(filters::query::query()) - .map(move |limits| { - let tracker = api_torrents.clone(); - (limits, tracker) - }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { - let offset = limits.offset.unwrap_or(0); - let limit = min(limits.limit.unwrap_or(1000), 4000); - - let db = tracker.get_torrents().await; - let results: Vec<_> = db - .iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - ListItem { - info_hash: info_hash.to_string(), - seeders, - completed, - leechers, - peers: None, - } - }) - .skip(offset as usize) - .take(limit as usize) - .collect(); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - }); - - // GET /api/stats - // View tracker status - let api_stats = tracker.clone(); - let view_stats_list = filters::method::get() - .and(filters::path::path("stats")) - .and(filters::path::end()) - .map(move || api_stats.clone()) - .and_then(|tracker: Arc| async move { - let mut results = Stats { - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }; - - let db = tracker.get_torrents().await; - - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }); - - let stats = tracker.get_stats().await; - - #[allow(clippy::cast_possible_truncation)] - { - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - } - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - }); - - // GET /api/torrent/:info_hash - // View torrent info - let t2 = tracker.clone(); - let view_torrent_info = filters::method::get() - .and(filters::path::path("torrent")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t2.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - let db = tracker.get_torrents().await; - let torrent_entry_option = db.get(&info_hash); - - let torrent_entry = match torrent_entry_option { - Some(torrent_entry) => torrent_entry, - None => { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); - } - }; - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - let peers = torrent_entry.get_peers(None); - - let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); - - Ok(reply::json(&Torrent { - info_hash: info_hash.to_string(), - seeders, - completed, - leechers, - peers: Some(peer_resources), - })) - }); - - // DELETE /api/whitelist/:info_hash - // Delete info hash from whitelist - let t3 = tracker.clone(); - let delete_torrent = filters::method::delete() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t3.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to remove torrent from whitelist".into(), - })), - } - }); - - // POST /api/whitelist/:info_hash - // Add info hash to whitelist - let t4 = tracker.clone(); - let add_torrent = filters::method::post() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t4.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to whitelist torrent".into(), - })), - } - }); - - // POST /api/key/:seconds_valid - // Generate new key - let t5 = tracker.clone(); - let create_key = filters::method::post() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |seconds_valid: u64| { - let tracker = t5.clone(); - (seconds_valid, tracker) - }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to generate key".into(), - })), - } - }); - - // DELETE /api/key/:key - // Delete key - let t6 = tracker.clone(); - let delete_key = filters::method::delete() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |key: String| { - let tracker = t6.clone(); - (key, tracker) - }) - .and_then(|(key, tracker): (String, Arc)| async move { - match tracker.remove_auth_key(&key).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to delete key".into(), - })), - } - }); + let (_addr, api_server) = serve(routes(tracker)).bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }); - // GET /api/whitelist/reload - // Reload whitelist - let t7 = tracker.clone(); - let reload_whitelist = filters::method::get() - .and(filters::path::path("whitelist")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t7.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_whitelist().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload whitelist".into(), - })), - } - }); + api_server +} - // GET /api/keys/reload - // Reload whitelist - let t8 = tracker.clone(); - let reload_keys = filters::method::get() - .and(filters::path::path("keys")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t8.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_keys().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload keys".into(), - })), - } +pub fn start_tls( + socket_addr: SocketAddr, + ssl_cert_path: String, + ssl_key_path: String, + tracker: &Arc, +) -> impl warp::Future { + let (_addr, api_server) = serve(routes(tracker)) + .tls() + .cert_path(ssl_cert_path) + .key_path(ssl_key_path) + .bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); - let api_routes = filters::path::path("api").and( - view_torrent_list - .or(delete_torrent) - .or(view_torrent_info) - .or(view_stats_list) - .or(add_torrent) - .or(create_key) - .or(delete_key) - .or(reload_whitelist) - .or(reload_keys), - ); - - let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); - - let (_addr, api_server) = serve(server).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - api_server } diff --git a/src/config.rs b/src/config.rs index ba99e0f45..66def17cd 100644 --- a/src/config.rs +++ b/src/config.rs @@ -30,10 +30,16 @@ pub struct HttpTracker { pub ssl_key_path: Option, } +#[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct HttpApi { pub enabled: bool, pub bind_address: String, + pub ssl_enabled: bool, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_cert_path: Option, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_key_path: Option, pub access_tokens: HashMap, } @@ -81,20 +87,8 @@ impl std::fmt::Display for Error { impl std::error::Error for Error {} -impl Configuration { - #[must_use] - pub fn get_ext_ip(&self) -> Option { - match &self.external_ip { - None => None, - Some(external_ip) => match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None, - }, - } - } - - #[must_use] - pub fn default() -> Configuration { +impl Default for Configuration { + fn default() -> Self { let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: mode::Mode::Public, @@ -114,6 +108,9 @@ impl Configuration { http_api: HttpApi { enabled: true, bind_address: String::from("127.0.0.1:1212"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] .iter() .cloned() @@ -133,6 +130,19 @@ impl Configuration { }); configuration } +} + +impl Configuration { + #[must_use] + pub fn get_ext_ip(&self) -> Option { + match &self.external_ip { + None => None, + Some(external_ip) => match IpAddr::from_str(external_ip) { + Ok(external_ip) => Some(external_ip), + Err(_) => None, + }, + } + } /// # Errors /// @@ -208,6 +218,9 @@ mod tests { [http_api] enabled = true bind_address = "127.0.0.1:1212" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" [http_api.access_tokens] admin = "MyAccessToken" diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index b8f031f5a..c62bc5cc9 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -22,10 +22,10 @@ pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHa let http_tracker = Http::new(tracker); if !ssl_enabled { - info!("Starting HTTP server on: {}", bind_addr); + info!("Starting HTTP server on: http://{}", bind_addr); http_tracker.start(bind_addr).await; } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: {} (TLS)", bind_addr); + info!("Starting HTTPS server on: https://{} (TLS)", bind_addr); http_tracker .start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()) .await; diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 2c00aa453..211174f35 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -5,7 +5,7 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::api::server; -use crate::config::Configuration; +use crate::config::HttpApi; use crate::tracker; #[derive(Debug)] @@ -14,24 +14,30 @@ pub struct ApiServerJobStarted(); /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { +pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { let bind_addr = config - .http_api .bind_address .parse::() .expect("Tracker API bind_address invalid."); - - info!("Starting Torrust API server on: {}", bind_addr); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); let (tx, rx) = oneshot::channel::(); // Run the API server let join_handle = tokio::spawn(async move { - let handel = server::start(bind_addr, &tracker); - - tx.send(ApiServerJobStarted()).expect("the start job dropped"); - - handel.await; + if !ssl_enabled { + info!("Starting Torrust API server on: http://{}", bind_addr); + let handle = server::start(bind_addr, &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + handle.await; + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust API server on: https://{}", bind_addr); + let handle = server::start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap(), &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + handle.await; + } }); // Wait until the API server job is running diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 57369f660..d0907c976 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -14,11 +14,11 @@ pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHan tokio::spawn(async move { match Udp::new(tracker, &bind_addr).await { Ok(udp_server) => { - info!("Starting UDP server on: {}", bind_addr); + info!("Starting UDP server on: udp://{}", bind_addr); udp_server.start().await; } Err(e) => { - warn!("Could not start UDP tracker on: {}", bind_addr); + warn!("Could not start UDP tracker on: udp://{}", bind_addr); error!("{}", e); } } diff --git a/src/setup.rs b/src/setup.rs index a7b7c5a82..c045310bb 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -49,7 +49,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve // Start HTTP API server if config.http_api.enabled { - jobs.push(tracker_api::start_job(config, tracker.clone()).await); + jobs.push(tracker_api::start_job(&config.http_api, tracker.clone()).await); } // Remove torrents without peers, every interval diff --git a/tests/api.rs b/tests/api.rs index 84ddac573..dfb8d81b3 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -292,7 +292,7 @@ mod tracker_api { logging::setup(&configuration); // Start the HTTP API job - self.job = Some(tracker_api::start_job(&configuration, tracker).await); + self.job = Some(tracker_api::start_job(&configuration.http_api, tracker).await); self.started.store(true, Ordering::Relaxed); } From 19abf0f31df8fcc0c9332f60ae6ab74c181df776 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 11:06:10 +0000 Subject: [PATCH 202/435] fix: error when udp response can't be written Instead of using a "debug" log level. --- src/udp/server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/udp/server.rs b/src/udp/server.rs index 5bd835365..a868cbd10 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -3,7 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::Response; -use log::{debug, info}; +use log::{debug, error, info}; use tokio::net::UdpSocket; use crate::tracker; @@ -71,7 +71,7 @@ impl Udp { Udp::send_packet(socket, &remote_addr, &inner[..position]).await; } Err(_) => { - debug!("could not write response to bytes."); + error!("could not write response to bytes."); } } } From b1ec9dfc86643be3463a3b4d7b7cd1ed2bf2a4b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 11:15:50 +0000 Subject: [PATCH 203/435] feat: change udp tracker console output Using "debug" for sensitive data like IP address and info for generic info we can log even on production. --- src/udp/server.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/udp/server.rs b/src/udp/server.rs index a868cbd10..e85c81e9d 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -45,10 +45,12 @@ impl Udp { Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { let payload = data[..valid_bytes].to_vec(); - debug!("Received {} bytes from {}", payload.len(), remote_addr); - debug!("{:?}", payload); + info!("Received {} bytes", payload.len()); + debug!("From: {}", &remote_addr); + debug!("Payload: {:?}", payload); let response = handle_packet(remote_addr, payload, tracker).await; + Udp::send_response(socket, remote_addr, response).await; } } @@ -56,8 +58,6 @@ impl Udp { } async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { - debug!("sending response to: {:?}", &remote_addr); - let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); @@ -67,8 +67,13 @@ impl Udp { let position = cursor.position() as usize; let inner = cursor.get_ref(); - debug!("{:?}", &inner[..position]); + info!("Sending {} bytes ...", &inner[..position].len()); + debug!("To: {:?}", &remote_addr); + debug!("Payload: {:?}", &inner[..position]); + Udp::send_packet(socket, &remote_addr, &inner[..position]).await; + + info!("{} bytes sent", &inner[..position].len()); } Err(_) => { error!("could not write response to bytes."); From ca0e8afce4a4b5430631020648894215865fe838 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 11:20:13 +0000 Subject: [PATCH 204/435] feat: change default http tracker port to 7070 Azure Container Instances do not allow you to open the same port as UDP and TCP. --- README.md | 2 +- src/config.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index beb2591ea..4e464dd68 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ bind_address = "0.0.0.0:6969" [[http_trackers]] enabled = true -bind_address = "0.0.0.0:6969" +bind_address = "0.0.0.0:7070" ssl_enabled = false ssl_cert_path = "" ssl_key_path = "" diff --git a/src/config.rs b/src/config.rs index 66def17cd..d56c2d34d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -123,7 +123,7 @@ impl Default for Configuration { }); configuration.http_trackers.push(HttpTracker { enabled: false, - bind_address: String::from("0.0.0.0:6969"), + bind_address: String::from("0.0.0.0:7070"), ssl_enabled: false, ssl_cert_path: None, ssl_key_path: None, @@ -210,7 +210,7 @@ mod tests { [[http_trackers]] enabled = false - bind_address = "0.0.0.0:6969" + bind_address = "0.0.0.0:7070" ssl_enabled = false ssl_cert_path = "" ssl_key_path = "" From 269e5f5bb085d08f00edb98305313f7b86471719 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 12:00:26 +0000 Subject: [PATCH 205/435] feat: move default db_path to storage folder Azure Container Intances do not allow you to mount a single file. I've created a storage folder where we can put all the things we want to persist. --- .gitignore | 2 ++ src/config.rs | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index ba9ceeb53..b80e2038c 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,6 @@ /config.toml /data.db /.vscode/launch.json +/storage/ + diff --git a/src/config.rs b/src/config.rs index d56c2d34d..fdfcb3d09 100644 --- a/src/config.rs +++ b/src/config.rs @@ -93,7 +93,7 @@ impl Default for Configuration { log_level: Option::from(String::from("info")), mode: mode::Mode::Public, db_driver: Driver::Sqlite3, - db_path: String::from("data.db"), + db_path: String::from("./storage/database/data.db"), announce_interval: 120, min_announce_interval: 120, max_peer_timeout: 900, @@ -193,7 +193,7 @@ mod tests { let config = r#"log_level = "info" mode = "public" db_driver = "Sqlite3" - db_path = "data.db" + db_path = "./storage/database/data.db" announce_interval = 120 min_announce_interval = 120 max_peer_timeout = 900 From 3098ed2c59c420167797b3a6f697c697d440c0f2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 13:26:09 +0000 Subject: [PATCH 206/435] feat: remove strip from Cargo.toml The option "strip = true" in the Cargo.toml file prevetns docker to use the cache for the cargo dependencies. ``` [profile.release] ... strip = true ``` More info: https://github.com/LukeMathWalker/cargo-chef/issues/172 --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 80e9009f1..6e835bcb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,6 @@ lto = "thin" debug = 1 opt-level = 3 lto = "fat" -strip = true [dependencies] tokio = { version = "1", features = [ From f8700aacaeaf1fb9d0201377aa35414012e298e4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 13:30:59 +0000 Subject: [PATCH 207/435] feat: allow to inject configuration from env var You can use an env var to pass the configuration instead of using the configuration file in the root folder `config.toml` ``` TORRUST_TRACKER_CONFIG=$(cat config.toml) TORRUST_TRACKER_CONFIG=`cat config.toml` cargo run ``` This allow the applciation to be executed in dockerized environments whithout needing to mount a file or volume for the configuration. --- src/config.rs | 26 +++++++++++++++++++++++--- src/main.rs | 15 +++++++++------ 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/src/config.rs b/src/config.rs index fdfcb3d09..48e28b358 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,10 +1,10 @@ use std::collections::HashMap; -use std::fs; use std::net::IpAddr; use std::path::Path; use std::str::FromStr; +use std::{env, fs}; -use config::{Config, ConfigError, File}; +use config::{Config, ConfigError, File, FileFormat}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; @@ -164,7 +164,7 @@ impl Configuration { let config = Configuration::default(); config.save_to_file(path)?; return Err(Error::Message( - "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), + "Please edit the config.TOML and restart the tracker.".to_string(), )); } @@ -173,6 +173,26 @@ impl Configuration { Ok(torrust_config) } + /// # Errors + /// + /// Will return `Err` if the environment variable does not exist or has a bad configuration. + pub fn load_from_env_var(config_env_var_name: &str) -> Result { + match env::var(config_env_var_name) { + Ok(config_toml) => { + let config_builder = Config::builder() + .add_source(File::from_str(&config_toml, FileFormat::Toml)) + .build() + .map_err(Error::ConfigError)?; + let config = config_builder.try_deserialize().map_err(Error::ConfigError)?; + Ok(config) + } + Err(_) => Err(Error::Message(format!( + "No environment variable for configuration found: {}", + &config_env_var_name + ))), + } + } + /// # Errors /// /// Will return `Err` if `filename` does not exist or the user does not have diff --git a/src/main.rs b/src/main.rs index a7316cef2..199e8f5c5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,4 @@ +use std::env; use std::sync::Arc; use log::info; @@ -7,7 +8,8 @@ use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, trac #[tokio::main] async fn main() { - const CONFIG_PATH: &str = "config.toml"; + const CONFIG_PATH: &str = "./config.toml"; + const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -16,11 +18,12 @@ async fn main() { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); // Initialize Torrust config - let config = match Configuration::load_from_file(CONFIG_PATH) { - Ok(config) => Arc::new(config), - Err(error) => { - panic!("{}", error) - } + let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { + println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); + Arc::new(Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap()) + } else { + println!("Loading configuration from config file {CONFIG_PATH}"); + Arc::new(Configuration::load_from_file(CONFIG_PATH).unwrap()) }; // Initialize statistics From 46e1a37ec08d6ebaee294348e1fa64245e7d5046 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 13:34:57 +0000 Subject: [PATCH 208/435] feat: docker support --- .dockerignore | 16 ++ .env.local | 1 + .github/workflows/publish_docker_image.yml | 73 ++++++ .github/workflows/test_docker.yml | 26 +++ .gitignore | 1 + Dockerfile | 80 +++++++ bin/install.sh | 13 ++ cSpell.json | 9 + compose.yaml | 48 ++++ config.toml.local | 34 +++ docker/README.md | 250 +++++++++++++++++++++ docker/bin/build.sh | 13 ++ docker/bin/install.sh | 4 + docker/bin/run.sh | 13 ++ 14 files changed, 581 insertions(+) create mode 100644 .dockerignore create mode 100644 .env.local create mode 100644 .github/workflows/publish_docker_image.yml create mode 100644 .github/workflows/test_docker.yml create mode 100644 Dockerfile create mode 100755 bin/install.sh create mode 100644 compose.yaml create mode 100644 config.toml.local create mode 100644 docker/README.md create mode 100755 docker/bin/build.sh create mode 100755 docker/bin/install.sh create mode 100755 docker/bin/run.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..3d8a25cce --- /dev/null +++ b/.dockerignore @@ -0,0 +1,16 @@ +.git +.git-blame-ignore +.github +.gitignore +.vscode +bin/ +config.toml +config.toml.local +cSpell.json +data.db +docker/ +NOTICE +README.md +rustfmt.toml +storage/ +target/ diff --git a/.env.local b/.env.local new file mode 100644 index 000000000..fefed56c4 --- /dev/null +++ b/.env.local @@ -0,0 +1 @@ +TORRUST_TRACKER_USER_UID=1000 \ No newline at end of file diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml new file mode 100644 index 000000000..b8e3791ed --- /dev/null +++ b/.github/workflows/publish_docker_image.yml @@ -0,0 +1,73 @@ +name: Publish docker image + +on: + push: + branches: + - 'develop' + # todo: only during development of issue 11 + - 'docker' + - 'docker-reorganized-pr' + tags: + - "v*" + +env: + # Azure file share volume mount requires the Linux container run as root + # https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations + TORRUST_TRACKER_RUN_AS_USER: root + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + components: llvm-tools-preview + - uses: Swatinem/rust-cache@v1 + - name: Run Tests + run: cargo test + + dockerhub: + needs: test + runs-on: ubuntu-latest + environment: dockerhub-torrust + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: | + # For example: torrust/tracker + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v3 + with: + context: . + file: ./Dockerfile + build-args: | + RUN_AS_USER=${{ env.TORRUST_TRACKER_RUN_AS_USER }} + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml new file mode 100644 index 000000000..2cfa4de5c --- /dev/null +++ b/.github/workflows/test_docker.yml @@ -0,0 +1,26 @@ +name: Test docker build + +on: + push: + pull_request: + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build docker image + uses: docker/build-push-action@v3 + with: + context: . + file: ./Dockerfile + push: false + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Build docker-compose images + run: docker compose build diff --git a/.gitignore b/.gitignore index b80e2038c..d574298da 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.env /target **/*.rs.bk /database.json.bz2 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..96d21fa84 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,80 @@ +FROM clux/muslrust:stable AS chef +WORKDIR /app +RUN cargo install cargo-chef + + +FROM chef AS planner +WORKDIR /app +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + + +FROM chef as development +WORKDIR /app +ARG UID=1000 +ARG RUN_AS_USER=appuser +ARG TRACKER_UDP_PORT=6969 +ARG TRACKER_HTTP_PORT=7070 +ARG TRACKER_API_PORT=1212 +# Add the app user for development +ENV USER=appuser +ENV UID=$UID +RUN adduser --uid "${UID}" "${USER}" +# Build dependencies +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --recipe-path recipe.json +# Build the application +COPY . . +RUN cargo build --bin torrust-tracker +USER $RUN_AS_USER:$RUN_AS_USER +EXPOSE $TRACKER_UDP_PORT/udp +EXPOSE $TRACKER_HTTP_PORT/tcp +EXPOSE $TRACKER_API_PORT/tcp +CMD ["cargo", "run"] + + +FROM chef AS builder +WORKDIR /app +ARG UID=1000 +# Add the app user for production +ENV USER=appuser +ENV UID=$UID +RUN adduser \ + --disabled-password \ + --gecos "" \ + --home "/nonexistent" \ + --shell "/sbin/nologin" \ + --no-create-home \ + --uid "${UID}" \ + "${USER}" +# Build dependencies +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --release --target x86_64-unknown-linux-musl --recipe-path recipe.json +# Build the application +COPY . . +RUN cargo build --release --target x86_64-unknown-linux-musl --bin torrust-tracker +# Strip the binary +# More info: https://github.com/LukeMathWalker/cargo-chef/issues/149 +RUN strip /app/target/x86_64-unknown-linux-musl/release/torrust-tracker + + +FROM alpine:latest +WORKDIR /app +ARG RUN_AS_USER=appuser +ARG TRACKER_UDP_PORT=6969 +ARG TRACKER_HTTP_PORT=7070 +ARG TRACKER_API_PORT=1212 +RUN apk --no-cache add ca-certificates +ENV TZ=Etc/UTC +ENV RUN_AS_USER=$RUN_AS_USER +COPY --from=builder /etc/passwd /etc/passwd +COPY --from=builder /etc/group /etc/group +COPY --from=builder --chown=$RUN_AS_USER \ + /app/target/x86_64-unknown-linux-musl/release/torrust-tracker \ + /app/torrust-tracker +RUN chown -R $RUN_AS_USER:$RUN_AS_USER /app +USER $RUN_AS_USER:$RUN_AS_USER +EXPOSE $TRACKER_UDP_PORT/udp +EXPOSE $TRACKER_HTTP_PORT/tcp +EXPOSE $TRACKER_API_PORT/tcp +ENTRYPOINT ["/app/torrust-tracker"] \ No newline at end of file diff --git a/bin/install.sh b/bin/install.sh new file mode 100755 index 000000000..d4314ce93 --- /dev/null +++ b/bin/install.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Generate the default settings file if it does not exist +if ! [ -f "./config.toml" ]; then + cp ./config.toml.local ./config.toml +fi + +# Generate the sqlite database if it does not exist +if ! [ -f "./storage/database/data.db" ]; then + # todo: it should get the path from config.toml and only do it when we use sqlite + touch ./storage/database/data.db + echo ";" | sqlite3 ./storage/database/data.db +fi diff --git a/cSpell.json b/cSpell.json index cc3359d58..5bc67a0c8 100644 --- a/cSpell.json +++ b/cSpell.json @@ -9,31 +9,38 @@ "Bitflu", "bools", "bufs", + "Buildx", "byteorder", "canonicalize", "canonicalized", "chrono", "clippy", "completei", + "dockerhub", "downloadedi", "filesd", "Freebox", "hasher", "hexlify", + "hlocalhost", "Hydranode", "incompletei", + "infoschema", "intervali", "leecher", "leechers", "libtorrent", "Lphant", "mockall", + "myacicontext", "nanos", "nextest", "nocapture", "oneshot", "ostr", "Pando", + "proot", + "Quickstart", "Rasterbar", "repr", "reqwest", @@ -50,9 +57,11 @@ "thiserror", "Torrentstorm", "torrust", + "torrustracker", "typenum", "Unamed", "untuple", + "uroot", "Vagaa", "Xtorrent", "Xunlei" diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 000000000..d11f9c8ae --- /dev/null +++ b/compose.yaml @@ -0,0 +1,48 @@ +name: torrust +services: + + tracker: + build: + context: . + target: development + user: ${TORRUST_TRACKER_USER_UID:-1000}:${TORRUST_TRACKER_USER_UID:-1000} + tty: true + networks: + - server_side + ports: + - 6969:6969/udp + - 7070:7070 + - 1212:1212 + volumes: + - ./:/app + - ~/.cargo:/home/appuser/.cargo + depends_on: + - mysql + + mysql: + image: mysql:8.0 + command: '--default-authentication-plugin=mysql_native_password' + restart: always + healthcheck: + test: ['CMD-SHELL', 'mysqladmin ping -h 127.0.0.1 --password="$$(cat /run/secrets/db-password)" --silent'] + interval: 3s + retries: 5 + start_period: 30s + environment: + - MYSQL_ROOT_HOST=% + - MYSQL_ROOT_PASSWORD=root_secret_password + - MYSQL_DATABASE=torrust_tracker + - MYSQL_USER=db_user + - MYSQL_PASSWORD=db_user_secret_password + networks: + - server_side + ports: + - 3306:3306 + volumes: + - mysql_data:/var/lib/mysql + +networks: + server_side: {} + +volumes: + mysql_data: {} \ No newline at end of file diff --git a/config.toml.local b/config.toml.local new file mode 100644 index 000000000..baf272d5a --- /dev/null +++ b/config.toml.local @@ -0,0 +1,34 @@ +log_level = "info" +mode = "public" +db_driver = "Sqlite3" +db_path = "./storage/database/data.db" +announce_interval = 120 +min_announce_interval = 120 +max_peer_timeout = 900 +on_reverse_proxy = false +external_ip = "0.0.0.0" +tracker_usage_statistics = true +persistent_torrent_completed_stat = false +inactive_peer_cleanup_interval = 600 +remove_peerless_torrents = true + +[[udp_trackers]] +enabled = false +bind_address = "0.0.0.0:6969" + +[[http_trackers]] +enabled = false +bind_address = "0.0.0.0:7070" +ssl_enabled = false +ssl_cert_path = "" +ssl_key_path = "" + +[http_api] +enabled = true +bind_address = "127.0.0.1:1212" +ssl_enabled = false +ssl_cert_path = "" +ssl_key_path = "" + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..8646b952e --- /dev/null +++ b/docker/README.md @@ -0,0 +1,250 @@ +# Docker + +## Requirements + +- Docker version 20.10.21 +- You need to create the `storage` directory with this structure and files: + +```s +$ tree storage/ +storage/ +ā”œā”€ā”€ database +│   └── data.db +└── ssl_certificates + ā”œā”€ā”€ localhost.crt + └── localhost.key +``` + +> NOTE: you only need the `ssl_certificates` directory and certificates in case you have enabled SSL for the one HTTP tracker or the API. + +## Dev environment + +### With docker + +Build and run locally: + +```s +docker context use default +export TORRUST_TRACKER_USER_UID=1000 +./docker/bin/build.sh $TORRUST_TRACKER_USER_UID +./bin/install.sh +./docker/bin/run.sh $TORRUST_TRACKER_USER_UID +``` + +Run using the pre-built public docker image: + +```s +export TORRUST_TRACKER_USER_UID=1000 +docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust/torrust-tracker +``` + +> NOTES: +> +> - You have to create the SQLite DB (`data.db`) and configuration (`config.toml`) before running the tracker. See `bin/install.sh`. +> - You have to replace the user UID (`1000`) with yours. +> - Remember to switch to your default docker context `docker context use default`. + +### With docker-compose + +The docker-compose configuration includes the MySQL service configuration. If you want to use MySQL instead of SQLite you have to change your `config.toml` configuration: + +```toml +db_driver = "MySQL" +db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +``` + +If you want to inject an environment variable into docker-compose you can use the file `.env`. There is a template `.env.local`. + +Build and run it locally: + +```s +docker compose up --build +``` + +After running the "up" command you will have two running containers: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +06feacb91a9e torrust-tracker "cargo run" 18 minutes ago Up 4 seconds 0.0.0.0:1212->1212/tcp, :::1212->1212/tcp, 0.0.0.0:7070->7070/tcp, :::7070->7070/tcp, 0.0.0.0:6969->6969/udp, :::6969->6969/udp torrust-tracker-1 +34d29e792ee2 mysql:8.0 "docker-entrypoint.s…" 18 minutes ago Up 5 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp torrust-mysql-1 +``` + +And you should be able to use the application, for example making a request to the API: + + + +You can stop the containers with: + +```s +docker compose down +``` + +Additionally, you can delete all resources (containers, volumes, networks) with: + +```s +docker compose down -v +``` + +### Access Mysql with docker + +These are some useful commands for MySQL. + +Open a shell in the MySQL container using docker or docker-compose. + +```s +docker exec -it torrust-mysql-1 /bin/bash +docker compose exec mysql /bin/bash +``` + +Connect to MySQL from inside the MySQL container or from the host: + +```s +mysql -h127.0.0.1 -uroot -proot_secret_password +``` + +The when MySQL container is started the first time, it creates the database, user, and permissions needed. +If you see the error "Host is not allowed to connect to this MySQL server" you can check that users have the right permissions in the database. Make sure the user `root` and `db_user` can connect from any host (`%`). + +```s +mysql> SELECT host, user FROM mysql.user; ++-----------+------------------+ +| host | user | ++-----------+------------------+ +| % | db_user | +| % | root | +| localhost | mysql.infoschema | +| localhost | mysql.session | +| localhost | mysql.sys | +| localhost | root | ++-----------+------------------+ +6 rows in set (0.00 sec) +``` + +If the database, user or permissions are not created the reason could be the MySQL container volume can be corrupted. Delete it and start again the containers. + +### SSL Certificates + +You can use a certificate for localhost. You can create your [localhost certificate](https://letsencrypt.org/docs/certificates-for-localhost/#making-and-trusting-your-own-certificates) and use it in the `storage` folder and the configuration file (`config.toml`). For example: + +The storage folder must contain your certificates: + +```s +$ tree storage/ +storage/ +ā”œā”€ā”€ database +│   └── data.db +└── ssl_certificates + ā”œā”€ā”€ localhost.crt + └── localhost.key +``` + +You have not enabled it in your `config.toml` file: + +```toml +... +[[http_trackers]] +enabled = true +bind_address = "0.0.0.0:7070" +ssl_enabled = true +ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +ssl_key_path = "./storage/ssl_certificates/localhost.key" + +[http_api] +enabled = true +bind_address = "0.0.0.0:1212" +ssl_enabled = true +ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +ssl_key_path = "./storage/ssl_certificates/localhost.key" +... +``` + +> NOTE: you can enable it independently for each HTTP tracker or the API. + +If you enable the SSL certificate for the API, for example, you can load the API with this URL: + + + +## Prod environment + +In this section, you will learn how to deploy the tracker to a single docker container in Azure Container Instances. + +> NOTE: Azure Container Instances is a solution when you want to run an isolated container. If you need full container orchestration, including service discovery across multiple containers, automatic scaling, and coordinated application upgrades, we recommend [Kubernetes](https://kubernetes.io/). + +Deploy to Azure Container Instance following [docker documentation](https://docs.docker.com/cloud/aci-integration/). + +You have to create the ACI context and the storage: + +```s +docker context create aci myacicontext +docker context use myacicontext +docker volume create test-volume --storage-account torrustracker +``` + +You need to create all the files needed by the application in the storage dir `storage/database`. + +And finally, you can run the container: + +```s +docker run \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume torrustracker/test-volume:/app/storage \ + registry.hub.docker.com/torrust/torrust-tracker:latest +``` + +Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. + +```s +docker run \ + --detach + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \latest + --volume torrustracker/test-volume:/app/storage \ + registry.hub.docker.com/torrust/torrust-tracker:latest +``` + +You should see something like this: + +```s +[+] Running 2/2 + ā æ Group intelligent-hawking Created 5.0s + ā æ intelligent-hawking Created 41.7s +2022-12-08T18:39:19.697869300+00:00 [torrust_tracker::logging][INFO] logging initialized. +2022-12-08T18:39:19.712651100+00:00 [torrust_tracker::jobs::udp_tracker][INFO] Starting UDP server on: 0.0.0.0:6969 +2022-12-08T18:39:19.712792700+00:00 [torrust_tracker::jobs::tracker_api][INFO] Starting Torrust API server on: 0.0.0.0:1212 +2022-12-08T18:39:19.725124+00:00 [torrust_tracker::jobs::tracker_api][INFO] Torrust API server started +``` + +You can see the container with: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND STATUS PORTS +intelligent-hawking registry.hub.docker.com/torrust/torrust-tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp +``` + +After a while, you can use the tracker API `http://4.236.213.57:1212/api/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. + +> NOTES: +> +> - [There is no support for mounting a single file](https://docs.docker.com/cloud/aci-container-features/#persistent-volumes), or mounting a subfolder from an `Azure File Share`. +> - [ACI does not allow port mapping](https://docs.docker.com/cloud/aci-integration/#exposing-ports). +> - [Azure file share volume mount requires the Linux container run as root](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations). +> - It can take some minutes until the public IP for the ACI container is available. +> - You can use the Azure web UI to download files from the storage. For example, the SQLite database. +> - [It seems you can only expose web interfaces on port 80 on Azure Container Instances](https://stackoverflow.com/a/56768087/3012842). Not official documentation! + +## Links + +- [Deploying Docker containers on Azure](https://docs.docker.com/cloud/aci-integration/). +- [Docker run options for ACI containers](https://docs.docker.com/cloud/aci-container-features/). +- [Quickstart: Deploy a container instance in Azure using the Docker CLI](https://learn.microsoft.com/en-us/azure/container-instances/quickstart-docker-cli). diff --git a/docker/bin/build.sh b/docker/bin/build.sh new file mode 100755 index 000000000..d77d1ad34 --- /dev/null +++ b/docker/bin/build.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} +TORRUST_TRACKER_RUN_AS_USER=${TORRUST_TRACKER_RUN_AS_USER:-appuser} + +echo "Building docker image ..." +echo "TORRUST_TRACKER_USER_UID: $TORRUST_TRACKER_USER_UID" +echo "TORRUST_TRACKER_RUN_AS_USER: $TORRUST_TRACKER_RUN_AS_USER" + +docker build \ + --build-arg UID="$TORRUST_TRACKER_USER_UID" \ + --build-arg RUN_AS_USER="$TORRUST_TRACKER_RUN_AS_USER" \ + -t torrust-tracker . diff --git a/docker/bin/install.sh b/docker/bin/install.sh new file mode 100755 index 000000000..a58969378 --- /dev/null +++ b/docker/bin/install.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +./docker/bin/build.sh +./bin/install.sh diff --git a/docker/bin/run.sh b/docker/bin/run.sh new file mode 100755 index 000000000..86465baeb --- /dev/null +++ b/docker/bin/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} +TORRUST_TRACKER_CONFIG=$(cat config.toml) + +docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --env TORRUST_TRACKER_CONFIG="$TORRUST_TRACKER_CONFIG" \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust-tracker From 032f6a63af2c7ad95f1426e7fdba409569170b89 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 15:58:06 +0000 Subject: [PATCH 209/435] fix: docker repo name in README --- docker/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/README.md b/docker/README.md index 8646b952e..e5b4dfe74 100644 --- a/docker/README.md +++ b/docker/README.md @@ -41,7 +41,7 @@ docker run -it \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \ --volume "$(pwd)/storage":"/app/storage" \ - torrust/torrust-tracker + torrust/tracker ``` > NOTES: @@ -197,7 +197,7 @@ docker run \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \ --volume torrustracker/test-volume:/app/storage \ - registry.hub.docker.com/torrust/torrust-tracker:latest + registry.hub.docker.com/torrust/tracker:latest ``` Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. @@ -209,7 +209,7 @@ docker run \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \latest --volume torrustracker/test-volume:/app/storage \ - registry.hub.docker.com/torrust/torrust-tracker:latest + registry.hub.docker.com/torrust/tracker:latest ``` You should see something like this: @@ -229,7 +229,7 @@ You can see the container with: ```s $ docker ps CONTAINER ID IMAGE COMMAND STATUS PORTS -intelligent-hawking registry.hub.docker.com/torrust/torrust-tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp +intelligent-hawking registry.hub.docker.com/torrust/tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp ``` After a while, you can use the tracker API `http://4.236.213.57:1212/api/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. From 171a37d3c48cd365b987eaf280b73a5f35855e20 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 15:58:53 +0000 Subject: [PATCH 210/435] feat: publish docker image for tags, develop aand main branches --- .github/workflows/publish_docker_image.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index b8e3791ed..c6a103931 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -3,10 +3,8 @@ name: Publish docker image on: push: branches: + - 'main' - 'develop' - # todo: only during development of issue 11 - - 'docker' - - 'docker-reorganized-pr' tags: - "v*" From 6851ec5fc1adf206c0baaa20d6201e119c85a4af Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 Dec 2022 11:52:20 +0000 Subject: [PATCH 211/435] fix: docker image run as non root The published docker image runs by deafult as non root user. Some services like ACI could require run the container as root but for those cases they can run their customs builds or change the user while launching the container. --- .github/workflows/publish_docker_image.yml | 7 ++++--- cSpell.json | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index c6a103931..7593fb680 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -3,15 +3,16 @@ name: Publish docker image on: push: branches: - - 'main' - - 'develop' + - "main" + - "develop" tags: - "v*" env: # Azure file share volume mount requires the Linux container run as root # https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations - TORRUST_TRACKER_RUN_AS_USER: root + # TORRUST_TRACKER_RUN_AS_USER: root + TORRUST_TRACKER_RUN_AS_USER: appuser jobs: test: diff --git a/cSpell.json b/cSpell.json index 5bc67a0c8..57b9f3b67 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,5 +1,6 @@ { "words": [ + "appuser", "AUTOINCREMENT", "automock", "Avicora", From e4b2a8eb7a24c763c19207a64d273d634abf3626 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 09:48:47 +0000 Subject: [PATCH 212/435] feat: publish docker image only when secrets are set Since dockerhun does not allow scoepd token, we are going to use forks to publish docker images. The "publisher" can set their token on their forks. The workflow is executed only if the secret "DOCKER_HUB_USERNAME" is set in the environment "dockerhub-torrust" --- .github/workflows/publish_docker_image.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index 7593fb680..1587a0bd6 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -15,7 +15,21 @@ env: TORRUST_TRACKER_RUN_AS_USER: appuser jobs: + check-secret: + runs-on: ubuntu-latest + environment: dockerhub-torrust + outputs: + publish: ${{ steps.check.outputs.publish }} + steps: + - id: check + env: + DOCKER_HUB_USERNAME: "${{ secrets.DOCKER_HUB_USERNAME }}" + if: "${{ env.DOCKER_HUB_USERNAME != '' }}" + run: echo "publish=true" >> $GITHUB_OUTPUT + test: + needs: check-secret + if: needs.check-secret.outputs.publish == 'true' runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -30,6 +44,7 @@ jobs: dockerhub: needs: test + if: needs.check-secret.outputs.publish == 'true' runs-on: ubuntu-latest environment: dockerhub-torrust steps: From 96f386c77f3e503b645e2890884407897559a038 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 16:51:31 +0000 Subject: [PATCH 213/435] refactor: [#142] reorganize tests for the API --- tests/api.rs | 382 ------------------------------------------- tests/api/mod.rs | 214 ++++++++++++++++++++++++ tests/tracker_api.rs | 246 ++++++++++++++++++++++++++++ 3 files changed, 460 insertions(+), 382 deletions(-) delete mode 100644 tests/api.rs create mode 100644 tests/api/mod.rs create mode 100644 tests/tracker_api.rs diff --git a/tests/api.rs b/tests/api.rs deleted file mode 100644 index dfb8d81b3..000000000 --- a/tests/api.rs +++ /dev/null @@ -1,382 +0,0 @@ -/// Integration tests for the tracker API -/// -/// cargo test `tracker_api` -- --nocapture -extern crate rand; - -mod common; - -mod tracker_api { - use core::panic; - use std::env; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::str::FromStr; - use std::sync::atomic::{AtomicBool, Ordering}; - use std::sync::Arc; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use reqwest::Response; - use tokio::task::JoinHandle; - use torrust_tracker::api::resource; - use torrust_tracker::api::resource::auth_key::AuthKey; - use torrust_tracker::api::resource::stats::Stats; - use torrust_tracker::api::resource::torrent::{self, Torrent}; - use torrust_tracker::config::Configuration; - use torrust_tracker::jobs::tracker_api; - use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::statistics::Keeper; - use torrust_tracker::tracker::{auth, peer}; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - - use crate::common::ephemeral_random_port; - - #[tokio::test] - async fn should_allow_generating_a_new_auth_key() { - let api_server = ApiServer::new_running_instance().await; - - let seconds_valid = 60; - - let auth_key = ApiClient::new(api_server.get_connection_info().unwrap()) - .generate_auth_key(seconds_valid) - .await; - - // Verify the key with the tracker - assert!(api_server - .tracker - .unwrap() - .verify_auth_key(&auth::Key::from(auth_key)) - .await - .is_ok()); - } - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent() { - let api_server = ApiServer::new_running_instance().await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let res = ApiClient::new(api_server.get_connection_info().unwrap()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_eq!(res.status(), 200); - assert!( - api_server - .tracker - .unwrap() - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await - ); - } - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = ApiServer::new_running_instance().await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let api_client = ApiClient::new(api_server.get_connection_info().unwrap()); - - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); - - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); - } - - #[tokio::test] - async fn should_allow_getting_a_torrent_info() { - let api_server = ApiServer::new_running_instance().await; - let api_connection_info = api_server.get_connection_info().unwrap(); - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let (peer, peer_resource) = sample_torrent_peer(); - - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; - - let torrent_resource = ApiClient::new(api_connection_info).get_torrent(&info_hash.to_string()).await; - - assert_eq!( - torrent_resource, - Torrent { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: Some(vec![peer_resource]) - } - ); - } - - #[tokio::test] - async fn should_allow_getting_torrents() { - let api_server = ApiServer::new_running_instance().await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let (peer, _peer_resource) = sample_torrent_peer(); - - let api_connection_info = api_server.get_connection_info().unwrap(); - - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; - - let torrent_resources = ApiClient::new(api_connection_info).get_torrents().await; - - assert_eq!( - torrent_resources, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None // Torrent list does not include peer list - }] - ); - } - - #[tokio::test] - async fn should_allow_getting_tracker_statistics() { - let api_server = ApiServer::new_running_instance().await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let (peer, _peer_resource) = sample_torrent_peer(); - - let api_connection_info = api_server.get_connection_info().unwrap(); - - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; - - let stats_resource = ApiClient::new(api_connection_info).get_tracker_statistics().await; - - assert_eq!( - stats_resource, - Stats { - torrents: 1, - seeders: 1, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - } - ); - } - - fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { - let torrent_peer = peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - let torrent_peer_resource = resource::peer::Peer::from(torrent_peer); - - (torrent_peer, torrent_peer_resource) - } - - fn tracker_configuration() -> Arc { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.http_api.bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - Arc::new(config) - } - - #[derive(Clone)] - struct ApiConnectionInfo { - pub bind_address: String, - pub api_token: String, - } - - impl ApiConnectionInfo { - pub fn new(bind_address: &str, api_token: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - api_token: api_token.to_string(), - } - } - } - - struct ApiServer { - pub started: AtomicBool, - pub job: Option>, - pub tracker: Option>, - pub connection_info: Option, - } - - impl ApiServer { - pub fn new() -> Self { - Self { - started: AtomicBool::new(false), - job: None, - tracker: None, - connection_info: None, - } - } - - pub async fn new_running_instance() -> ApiServer { - let configuration = tracker_configuration(); - ApiServer::new_running_custom_instance(configuration.clone()).await - } - - async fn new_running_custom_instance(configuration: Arc) -> ApiServer { - let mut api_server = ApiServer::new(); - api_server.start(configuration).await; - api_server - } - - pub async fn start(&mut self, configuration: Arc) { - if !self.started.load(Ordering::Relaxed) { - self.connection_info = Some(ApiConnectionInfo::new( - &configuration.http_api.bind_address.clone(), - &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), - )); - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - self.tracker = Some(tracker.clone()); - - // Initialize logging - logging::setup(&configuration); - - // Start the HTTP API job - self.job = Some(tracker_api::start_job(&configuration.http_api, tracker).await); - - self.started.store(true, Ordering::Relaxed); - } - } - - pub fn get_connection_info(&self) -> Option { - self.connection_info.clone() - } - } - - struct ApiClient { - connection_info: ApiConnectionInfo, - } - - impl ApiClient { - pub fn new(connection_info: ApiConnectionInfo) -> Self { - Self { connection_info } - } - - pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { - let url = format!( - "http://{}/api/key/{}?token={}", - &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token - ); - reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap() - } - - pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { - let url = format!( - "http://{}/api/whitelist/{}?token={}", - &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token - ); - reqwest::Client::new().post(url.clone()).send().await.unwrap() - } - - pub async fn get_torrent(&self, info_hash: &str) -> Torrent { - let url = format!( - "http://{}/api/torrent/{}?token={}", - &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::() - .await - .unwrap() - } - - pub async fn get_torrents(&self) -> Vec { - let url = format!( - "http://{}/api/torrents?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::>() - .await - .unwrap() - } - - pub async fn get_tracker_statistics(&self) -> Stats { - let url = format!( - "http://{}/api/stats?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::() - .await - .unwrap() - } - } -} diff --git a/tests/api/mod.rs b/tests/api/mod.rs new file mode 100644 index 000000000..9e2750122 --- /dev/null +++ b/tests/api/mod.rs @@ -0,0 +1,214 @@ +use core::panic; +use std::env; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use reqwest::Response; +use tokio::task::JoinHandle; +use torrust_tracker::api::resource; +use torrust_tracker::api::resource::auth_key::AuthKey; +use torrust_tracker::api::resource::stats::Stats; +use torrust_tracker::api::resource::torrent::{self, Torrent}; +use torrust_tracker::config::Configuration; +use torrust_tracker::jobs::tracker_api; +use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; +use torrust_tracker::tracker::peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; + +use crate::common::ephemeral_random_port; + +pub fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { + let torrent_peer = peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + let torrent_peer_resource = resource::peer::Peer::from(torrent_peer); + + (torrent_peer, torrent_peer_resource) +} + +pub fn tracker_configuration() -> Arc { + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; + + // Ephemeral socket address + let port = ephemeral_random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &port); + + // Ephemeral database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + Arc::new(config) +} + +#[derive(Clone)] +pub struct ConnectionInfo { + pub bind_address: String, + pub api_token: String, +} + +impl ConnectionInfo { + pub fn new(bind_address: &str, api_token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: api_token.to_string(), + } + } +} + +pub struct Server { + pub started: AtomicBool, + pub job: Option>, + pub tracker: Option>, + pub connection_info: Option, +} + +impl Server { + pub fn new() -> Self { + Self { + started: AtomicBool::new(false), + job: None, + tracker: None, + connection_info: None, + } + } + + pub async fn new_running_instance() -> Self { + let configuration = tracker_configuration(); + Self::new_running_custom_instance(configuration.clone()).await + } + + async fn new_running_custom_instance(configuration: Arc) -> Self { + let mut api_server = Self::new(); + api_server.start(configuration).await; + api_server + } + + pub async fn start(&mut self, configuration: Arc) { + if !self.started.load(Ordering::Relaxed) { + self.connection_info = Some(ConnectionInfo::new( + &configuration.http_api.bind_address.clone(), + &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), + )); + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + self.tracker = Some(tracker.clone()); + + // Initialize logging + logging::setup(&configuration); + + // Start the HTTP API job + self.job = Some(tracker_api::start_job(&configuration.http_api, tracker).await); + + self.started.store(true, Ordering::Relaxed); + } + } + + pub fn get_connection_info(&self) -> Option { + self.connection_info.clone() + } +} + +pub struct Client { + connection_info: ConnectionInfo, +} + +impl Client { + pub fn new(connection_info: ConnectionInfo) -> Self { + Self { connection_info } + } + + pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { + let url = format!( + "http://{}/api/key/{}?token={}", + &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token + ); + reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap() + } + + pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { + let url = format!( + "http://{}/api/whitelist/{}?token={}", + &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token + ); + reqwest::Client::new().post(url.clone()).send().await.unwrap() + } + + pub async fn get_torrent(&self, info_hash: &str) -> Torrent { + let url = format!( + "http://{}/api/torrent/{}?token={}", + &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap() + } + + pub async fn get_torrents(&self) -> Vec { + let url = format!( + "http://{}/api/torrents?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap() + } + + pub async fn get_tracker_statistics(&self) -> Stats { + let url = format!( + "http://{}/api/stats?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ); + reqwest::Client::builder() + .build() + .unwrap() + .get(url) + .send() + .await + .unwrap() + .json::() + .await + .unwrap() + } +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs new file mode 100644 index 000000000..070126d0a --- /dev/null +++ b/tests/tracker_api.rs @@ -0,0 +1,246 @@ +/// Integration tests for the tracker API +/// +/// ```text +/// cargo test tracker_api -- --nocapture +/// ``` +extern crate rand; + +mod api; +mod common; + +mod tracker_api { + + /* + + Endpoints: + + Stats: + GET /api/stats + + Torrents: + GET /api/torrents?offset=:u32&limit=:u32 + GET /api/torrent/:info_hash + + Whitelisted torrents: + POST /api/whitelist/:info_hash + DELETE /api/whitelist/:info_hash + + Whitelist command: + GET /api/whitelist/reload + + Keys: + POST /api/key/:seconds_valid + GET /api/keys/reload + DELETE /api/key/:key + + */ + + mod for_stats_resources { + use std::str::FromStr; + + use torrust_tracker::api::resource::stats::Stats; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::{sample_torrent_peer, Client, Server}; + + #[tokio::test] + async fn should_allow_getting_tracker_statistics() { + let api_server = Server::new_running_instance().await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let (peer, _peer_resource) = sample_torrent_peer(); + + let api_connection_info = api_server.get_connection_info().unwrap(); + + // Add a torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let stats_resource = Client::new(api_connection_info).get_tracker_statistics().await; + + assert_eq!( + stats_resource, + Stats { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + } + ); + } + } + + mod for_torrent_resources { + #[tokio::test] + async fn should_allow_getting_torrents() { + let api_server = Server::new_running_instance().await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let (peer, _peer_resource) = sample_torrent_peer(); + + let api_connection_info = api_server.get_connection_info().unwrap(); + + // Add a torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let torrent_resources = Client::new(api_connection_info).get_torrents().await; + + assert_eq!( + torrent_resources, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include peer list + }] + ); + } + + #[tokio::test] + async fn should_allow_getting_a_torrent_info() { + let api_server = Server::new_running_instance().await; + let api_connection_info = api_server.get_connection_info().unwrap(); + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let (peer, peer_resource) = sample_torrent_peer(); + + // Add a torrent to the tracker + api_server + .tracker + .unwrap() + .update_torrent_with_peer_and_get_stats(&info_hash, &peer) + .await; + + let torrent_resource = Client::new(api_connection_info).get_torrent(&info_hash.to_string()).await; + + assert_eq!( + torrent_resource, + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![peer_resource]) + } + ); + } + + use std::str::FromStr; + + use torrust_tracker::api::resource::torrent::{self, Torrent}; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::{sample_torrent_peer, Client, Server}; + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent() { + let api_server = Server::new_running_instance().await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let res = Client::new(api_server.get_connection_info().unwrap()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_eq!(res.status(), 200); + assert!( + api_server + .tracker + .unwrap() + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + } + } + + mod for_whitelisted_torrent_resources { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::{Client, Server}; + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent() { + let api_server = Server::new_running_instance().await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let res = Client::new(api_server.get_connection_info().unwrap()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_eq!(res.status(), 200); + assert!( + api_server + .tracker + .unwrap() + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + } + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { + let api_server = Server::new_running_instance().await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let api_client = Client::new(api_server.get_connection_info().unwrap()); + + let res = api_client.whitelist_a_torrent(&info_hash).await; + assert_eq!(res.status(), 200); + + let res = api_client.whitelist_a_torrent(&info_hash).await; + assert_eq!(res.status(), 200); + } + } + + mod for_key_resources { + use torrust_tracker::tracker::auth; + + use crate::api::{Client, Server}; + + #[tokio::test] + async fn should_allow_generating_a_new_auth_key() { + let api_server = Server::new_running_instance().await; + + let seconds_valid = 60; + + let auth_key = Client::new(api_server.get_connection_info().unwrap()) + .generate_auth_key(seconds_valid) + .await; + + // Verify the key with the tracker + assert!(api_server + .tracker + .unwrap() + .verify_auth_key(&auth::Key::from(auth_key)) + .await + .is_ok()); + } + } +} From 68d521e3615b7952aa42a0a122944cbadba93048 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 17:29:35 +0000 Subject: [PATCH 214/435] refactor: [#142] simplify test api server --- tests/api/mod.rs | 99 ++++++++++++++++++++------------------------ tests/tracker_api.rs | 66 ++++++++++------------------- 2 files changed, 67 insertions(+), 98 deletions(-) diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 9e2750122..14365af9c 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -1,12 +1,10 @@ use core::panic; use std::env; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; -use tokio::task::JoinHandle; use torrust_tracker::api::resource; use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::api::resource::stats::Stats; @@ -14,7 +12,8 @@ use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; -use torrust_tracker::tracker::peer; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::{self, Peer}; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -68,71 +67,63 @@ impl ConnectionInfo { } } -pub struct Server { - pub started: AtomicBool, - pub job: Option>, - pub tracker: Option>, - pub connection_info: Option, +pub async fn start_default_api_server() -> Server { + let configuration = tracker_configuration(); + start_custom_api_server(configuration.clone()).await } -impl Server { - pub fn new() -> Self { - Self { - started: AtomicBool::new(false), - job: None, - tracker: None, - connection_info: None, - } - } +pub async fn start_custom_api_server(configuration: Arc) -> Server { + start(configuration).await +} - pub async fn new_running_instance() -> Self { - let configuration = tracker_configuration(); - Self::new_running_custom_instance(configuration.clone()).await - } +async fn start(configuration: Arc) -> Server { + let connection_info = ConnectionInfo::new( + &configuration.http_api.bind_address.clone(), + &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), + ); - async fn new_running_custom_instance(configuration: Arc) -> Self { - let mut api_server = Self::new(); - api_server.start(configuration).await; - api_server - } + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); - pub async fn start(&mut self, configuration: Arc) { - if !self.started.load(Ordering::Relaxed) { - self.connection_info = Some(ConnectionInfo::new( - &configuration.http_api.bind_address.clone(), - &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), - )); + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + // Initialize logging + logging::setup(&configuration); - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - self.tracker = Some(tracker.clone()); + // Start the HTTP API job + tracker_api::start_job(&configuration.http_api, tracker.clone()).await; - // Initialize logging - logging::setup(&configuration); + Server { + tracker, + connection_info, + } +} - // Start the HTTP API job - self.job = Some(tracker_api::start_job(&configuration.http_api, tracker).await); +pub struct Server { + pub tracker: Arc, + pub connection_info: ConnectionInfo, +} - self.started.store(true, Ordering::Relaxed); - } +impl Server { + pub fn get_connection_info(&self) -> ConnectionInfo { + self.connection_info.clone() } - pub fn get_connection_info(&self) -> Option { - self.connection_info.clone() + /// Add a torrent to the tracker + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 070126d0a..3a835204f 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -41,24 +41,19 @@ mod tracker_api { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{sample_torrent_peer, Client, Server}; + use crate::api::{sample_torrent_peer, start_default_api_server, Client}; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let (peer, _peer_resource) = sample_torrent_peer(); - let api_connection_info = api_server.get_connection_info().unwrap(); + let api_connection_info = api_server.get_connection_info(); - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; + api_server.add_torrent(&info_hash, &peer).await; let stats_resource = Client::new(api_connection_info).get_tracker_statistics().await; @@ -89,20 +84,15 @@ mod tracker_api { mod for_torrent_resources { #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let (peer, _peer_resource) = sample_torrent_peer(); - let api_connection_info = api_server.get_connection_info().unwrap(); + let api_connection_info = api_server.get_connection_info(); - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; + api_server.add_torrent(&info_hash, &peer).await; let torrent_resources = Client::new(api_connection_info).get_torrents().await; @@ -120,19 +110,14 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = Server::new_running_instance().await; - let api_connection_info = api_server.get_connection_info().unwrap(); + let api_server = start_default_api_server().await; + let api_connection_info = api_server.get_connection_info(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let (peer, peer_resource) = sample_torrent_peer(); - // Add a torrent to the tracker - api_server - .tracker - .unwrap() - .update_torrent_with_peer_and_get_stats(&info_hash, &peer) - .await; + api_server.add_torrent(&info_hash, &peer).await; let torrent_resource = Client::new(api_connection_info).get_torrent(&info_hash.to_string()).await; @@ -153,15 +138,15 @@ mod tracker_api { use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{sample_torrent_peer, Client, Server}; + use crate::api::{sample_torrent_peer, start_default_api_server, Client}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info().unwrap()) + let res = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -169,7 +154,6 @@ mod tracker_api { assert!( api_server .tracker - .unwrap() .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); @@ -181,15 +165,15 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{Client, Server}; + use crate::api::{start_default_api_server, Client}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info().unwrap()) + let res = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -197,7 +181,6 @@ mod tracker_api { assert!( api_server .tracker - .unwrap() .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); @@ -205,11 +188,11 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info().unwrap()); + let api_client = Client::new(api_server.get_connection_info()); let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); @@ -222,25 +205,20 @@ mod tracker_api { mod for_key_resources { use torrust_tracker::tracker::auth; - use crate::api::{Client, Server}; + use crate::api::{start_default_api_server, Client}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = Server::new_running_instance().await; + let api_server = start_default_api_server().await; let seconds_valid = 60; - let auth_key = Client::new(api_server.get_connection_info().unwrap()) + let auth_key = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; // Verify the key with the tracker - assert!(api_server - .tracker - .unwrap() - .verify_auth_key(&auth::Key::from(auth_key)) - .await - .is_ok()); + assert!(api_server.tracker.verify_auth_key(&auth::Key::from(auth_key)).await.is_ok()); } } } From 3422e93a729bd30b89dbc7dacf3cd993463fd1bf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 17:48:56 +0000 Subject: [PATCH 215/435] refactor: [#142] clean api tests --- tests/api/mod.rs | 10 +++------- tests/tracker_api.rs | 39 ++++++++++++++++++--------------------- 2 files changed, 21 insertions(+), 28 deletions(-) diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 14365af9c..78c5d9d96 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -5,7 +5,6 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; -use torrust_tracker::api::resource; use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::api::resource::torrent::{self, Torrent}; @@ -19,8 +18,8 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; -pub fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { - let torrent_peer = peer::Peer { +pub fn sample_peer() -> peer::Peer { + peer::Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), @@ -28,10 +27,7 @@ pub fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { downloaded: NumberOfBytes(0), left: NumberOfBytes(0), event: AnnounceEvent::Started, - }; - let torrent_peer_resource = resource::peer::Peer::from(torrent_peer); - - (torrent_peer, torrent_peer_resource) + } } pub fn tracker_configuration() -> Arc { diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 3a835204f..757494691 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -41,21 +41,20 @@ mod tracker_api { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{sample_torrent_peer, start_default_api_server, Client}; + use crate::api::{sample_peer, start_default_api_server, Client}; #[tokio::test] async fn should_allow_getting_tracker_statistics() { let api_server = start_default_api_server().await; - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let (peer, _peer_resource) = sample_torrent_peer(); - - let api_connection_info = api_server.get_connection_info(); - - api_server.add_torrent(&info_hash, &peer).await; + api_server + .add_torrent( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &sample_peer(), + ) + .await; - let stats_resource = Client::new(api_connection_info).get_tracker_statistics().await; + let stats_resource = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; assert_eq!( stats_resource, @@ -88,13 +87,9 @@ mod tracker_api { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let (peer, _peer_resource) = sample_torrent_peer(); - - let api_connection_info = api_server.get_connection_info(); + api_server.add_torrent(&info_hash, &sample_peer()).await; - api_server.add_torrent(&info_hash, &peer).await; - - let torrent_resources = Client::new(api_connection_info).get_torrents().await; + let torrent_resources = Client::new(api_server.get_connection_info()).get_torrents().await; assert_eq!( torrent_resources, @@ -103,7 +98,7 @@ mod tracker_api { seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include peer list + peers: None // Torrent list does not include the peer list for each torrent }] ); } @@ -111,15 +106,16 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_a_torrent_info() { let api_server = start_default_api_server().await; - let api_connection_info = api_server.get_connection_info(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let (peer, peer_resource) = sample_torrent_peer(); + let peer = sample_peer(); api_server.add_torrent(&info_hash, &peer).await; - let torrent_resource = Client::new(api_connection_info).get_torrent(&info_hash.to_string()).await; + let torrent_resource = Client::new(api_server.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; assert_eq!( torrent_resource, @@ -128,17 +124,18 @@ mod tracker_api { seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![peer_resource]) + peers: Some(vec![resource::peer::Peer::from(peer)]) } ); } use std::str::FromStr; + use torrust_tracker::api::resource; use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::{sample_torrent_peer, start_default_api_server, Client}; + use crate::api::{sample_peer, start_default_api_server, Client}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { From 07364f43e23b62d68366b72dbee3fbb2897da5f5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 18:14:02 +0000 Subject: [PATCH 216/435] refactor: [#142] extract functions in test api Client --- tests/api/mod.rs | 62 ++++++++++++++++-------------------------------- 1 file changed, 20 insertions(+), 42 deletions(-) diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 78c5d9d96..1a2061d04 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -133,69 +133,47 @@ impl Client { } pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { - let url = format!( - "http://{}/api/key/{}?token={}", - &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token - ); - reqwest::Client::new().post(url).send().await.unwrap().json().await.unwrap() + self.post(&format!("key/{}", &seconds_valid)).await.json().await.unwrap() } pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { - let url = format!( - "http://{}/api/whitelist/{}?token={}", - &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token - ); - reqwest::Client::new().post(url.clone()).send().await.unwrap() + self.post(&format!("whitelist/{}", &info_hash)).await } pub async fn get_torrent(&self, info_hash: &str) -> Torrent { - let url = format!( - "http://{}/api/torrent/{}?token={}", - &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() + self.get(&format!("torrent/{}", &info_hash)) .await - .unwrap() .json::() .await .unwrap() } pub async fn get_torrents(&self) -> Vec { - let url = format!( - "http://{}/api/torrents?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ); - reqwest::Client::builder() - .build() - .unwrap() - .get(url) - .send() - .await - .unwrap() - .json::>() - .await - .unwrap() + self.get("torrents").await.json::>().await.unwrap() } pub async fn get_tracker_statistics(&self) -> Stats { - let url = format!( - "http://{}/api/stats?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ); + self.get("stats").await.json::().await.unwrap() + } + + async fn get(&self, path: &str) -> Response { reqwest::Client::builder() .build() .unwrap() - .get(url) + .get(self.url(path)) .send() .await .unwrap() - .json::() - .await - .unwrap() + } + + async fn post(&self, path: &str) -> Response { + reqwest::Client::new().post(self.url(path).clone()).send().await.unwrap() + } + + fn url(&self, path: &str) -> String { + format!( + "http://{}/api/{path}?token={}", + &self.connection_info.bind_address, &self.connection_info.api_token + ) } } From 11259e8a33dbfa0f3d141058c84f13bbe8547f7e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 30 Dec 2022 17:25:00 +0000 Subject: [PATCH 217/435] test(api): [#142] improved api test coverage --- src/tracker/auth.rs | 4 +- tests/api/mod.rs | 158 +++++++++++++++--- tests/tracker_api.rs | 385 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 491 insertions(+), 56 deletions(-) diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 02450dc82..406ef7033 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -4,7 +4,7 @@ use derive_more::{Display, Error}; use log::debug; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; -use serde::Serialize; +use serde::{Deserialize, Serialize}; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; @@ -48,7 +48,7 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { } } -#[derive(Serialize, Debug, Eq, PartialEq, Clone)] +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct Key { pub key: String, pub valid_until: Option, diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 1a2061d04..1528888bf 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -5,9 +5,6 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; -use torrust_tracker::api::resource::auth_key::AuthKey; -use torrust_tracker::api::resource::stats::Stats; -use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; @@ -51,14 +48,21 @@ pub fn tracker_configuration() -> Arc { #[derive(Clone)] pub struct ConnectionInfo { pub bind_address: String, - pub api_token: String, + pub api_token: Option, } impl ConnectionInfo { - pub fn new(bind_address: &str, api_token: &str) -> Self { + pub fn authenticated(bind_address: &str, api_token: &str) -> Self { Self { bind_address: bind_address.to_string(), - api_token: api_token.to_string(), + api_token: Some(api_token.to_string()), + } + } + + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: None, } } } @@ -73,7 +77,7 @@ pub async fn start_custom_api_server(configuration: Arc) -> Serve } async fn start(configuration: Arc) -> Server { - let connection_info = ConnectionInfo::new( + let connection_info = ConnectionInfo::authenticated( &configuration.http_api.bind_address.clone(), &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), ); @@ -117,6 +121,10 @@ impl Server { self.connection_info.clone() } + pub fn get_bind_address(&self) -> String { + self.connection_info.bind_address.clone() + } + /// Add a torrent to the tracker pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -127,53 +135,149 @@ pub struct Client { connection_info: ConnectionInfo, } +type ReqwestQuery = Vec; +type ReqwestQueryParam = (String, String); + +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } + + fn with_token(token: &str) -> Self { + Self { + params: vec![QueryParam::new("token", token)], + } + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} + impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { Self { connection_info } } - pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { - self.post(&format!("key/{}", &seconds_valid)).await.json().await.unwrap() + pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { + self.post(&format!("key/{}", &seconds_valid)).await + } + + pub async fn delete_auth_key(&self, key: &str) -> Response { + self.delete(&format!("key/{}", &key)).await + } + + pub async fn reload_keys(&self) -> Response { + self.get("keys/reload", Query::default()).await } pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { self.post(&format!("whitelist/{}", &info_hash)).await } - pub async fn get_torrent(&self, info_hash: &str) -> Torrent { - self.get(&format!("torrent/{}", &info_hash)) - .await - .json::() - .await - .unwrap() + pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { + self.delete(&format!("whitelist/{}", &info_hash)).await } - pub async fn get_torrents(&self) -> Vec { - self.get("torrents").await.json::>().await.unwrap() + pub async fn reload_whitelist(&self) -> Response { + self.get("whitelist/reload", Query::default()).await } - pub async fn get_tracker_statistics(&self) -> Stats { - self.get("stats").await.json::().await.unwrap() + pub async fn get_torrent(&self, info_hash: &str) -> Response { + self.get(&format!("torrent/{}", &info_hash), Query::default()).await } - async fn get(&self, path: &str) -> Response { + pub async fn get_torrents(&self, params: Query) -> Response { + self.get("torrents", params).await + } + + pub async fn get_tracker_statistics(&self) -> Response { + self.get("stats", Query::default()).await + } + + async fn get(&self, path: &str, params: Query) -> Response { + let mut query: Query = params; + + if let Some(token) = &self.connection_info.api_token { + query.add_param(QueryParam::new("token", token)); + }; + reqwest::Client::builder() .build() .unwrap() - .get(self.url(path)) + .get(self.base_url(path)) + .query(&ReqwestQuery::from(query)) .send() .await .unwrap() } async fn post(&self, path: &str) -> Response { - reqwest::Client::new().post(self.url(path).clone()).send().await.unwrap() + reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + async fn delete(&self, path: &str) -> Response { + reqwest::Client::new() + .delete(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() } - fn url(&self, path: &str) -> String { - format!( - "http://{}/api/{path}?token={}", - &self.connection_info.bind_address, &self.connection_info.api_token - ) + fn base_url(&self, path: &str) -> String { + format!("http://{}/api/{path}", &self.connection_info.bind_address) + } + + fn query_with_token(&self) -> Query { + match &self.connection_info.api_token { + Some(token) => Query::with_token(token), + None => Query::default(), + } } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 757494691..d02f29374 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -35,13 +35,43 @@ mod tracker_api { */ + use reqwest::Response; + + use crate::api::ConnectionInfo; + + async fn assert_token_not_valid(response: Response) { + assert_eq!(response.status(), 500); + assert_eq!( + response.text().await.unwrap(), + "Unhandled rejection: Err { reason: \"token not valid\" }" + ); + } + + async fn assert_unauthorized(response: Response) { + assert_eq!(response.status(), 500); + assert_eq!( + response.text().await.unwrap(), + "Unhandled rejection: Err { reason: \"unauthorized\" }" + ); + } + + fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::authenticated(bind_address, "invalid token") + } + + fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::anonymous(bind_address) + } + mod for_stats_resources { use std::str::FromStr; use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; + use super::{connection_with_invalid_token, connection_with_no_token}; use crate::api::{sample_peer, start_default_api_server, Client}; + use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_getting_tracker_statistics() { @@ -54,10 +84,11 @@ mod tracker_api { ) .await; - let stats_resource = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; + let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; + assert_eq!(response.status(), 200); assert_eq!( - stats_resource, + response.json::().await.unwrap(), Stats { torrents: 1, seeders: 1, @@ -78,9 +109,36 @@ mod tracker_api { } ); } + + #[tokio::test] + async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .get_tracker_statistics() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .get_tracker_statistics() + .await; + + assert_unauthorized(response).await; + } } mod for_torrent_resources { + use std::str::FromStr; + + use torrust_tracker::api::resource; + use torrust_tracker::api::resource::torrent::{self, Torrent}; + use torrust_tracker::protocol::info_hash::InfoHash; + + use super::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::{sample_peer, start_default_api_server, Client, Query, QueryParam}; + use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; + #[tokio::test] async fn should_allow_getting_torrents() { let api_server = start_default_api_server().await; @@ -89,10 +147,13 @@ mod tracker_api { api_server.add_torrent(&info_hash, &sample_peer()).await; - let torrent_resources = Client::new(api_server.get_connection_info()).get_torrents().await; + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::empty()) + .await; + assert_eq!(response.status(), 200); assert_eq!( - torrent_resources, + response.json::>().await.unwrap(), vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, @@ -103,6 +164,79 @@ mod tracker_api { ); } + #[tokio::test] + async fn should_allow_limiting_the_torrents_in_the_result() { + let api_server = start_default_api_server().await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + api_server.add_torrent(&info_hash_1, &sample_peer()).await; + api_server.add_torrent(&info_hash_2, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_allow_the_torrents_result_pagination() { + let api_server = start_default_api_server().await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + api_server.add_torrent(&info_hash_1, &sample_peer()).await; + api_server.add_torrent(&info_hash_2, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_not_allow_getting_torrents_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .get_torrents(Query::empty()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .get_torrents(Query::default()) + .await; + + assert_unauthorized(response).await; + } + #[tokio::test] async fn should_allow_getting_a_torrent_info() { let api_server = start_default_api_server().await; @@ -113,12 +247,13 @@ mod tracker_api { api_server.add_torrent(&info_hash, &peer).await; - let torrent_resource = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; + assert_eq!(response.status(), 200); assert_eq!( - torrent_resource, + response.json::().await.unwrap(), Torrent { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, @@ -129,31 +264,25 @@ mod tracker_api { ); } - use std::str::FromStr; + #[tokio::test] + async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { + let api_server = start_default_api_server().await; - use torrust_tracker::api::resource; - use torrust_tracker::api::resource::torrent::{self, Torrent}; - use torrust_tracker::protocol::info_hash::InfoHash; + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - use crate::api::{sample_peer, start_default_api_server, Client}; + api_server.add_torrent(&info_hash, &sample_peer()).await; - #[tokio::test] - async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api_server().await; + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .get_torrent(&info_hash.to_string()) + .await; - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + assert_token_not_valid(response).await; - let res = Client::new(api_server.get_connection_info()) - .whitelist_a_torrent(&info_hash) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .get_torrent(&info_hash.to_string()) .await; - assert_eq!(res.status(), 200); - assert!( - api_server - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await - ); + assert_unauthorized(response).await; } } @@ -162,7 +291,9 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; + use super::{assert_token_not_valid, connection_with_invalid_token, connection_with_no_token}; use crate::api::{start_default_api_server, Client}; + use crate::tracker_api::assert_unauthorized; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { @@ -197,12 +328,97 @@ mod tracker_api { let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); } + + #[tokio::test] + async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .whitelist_a_torrent(&info_hash) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .whitelist_a_torrent(&info_hash) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_allow_removing_a_torrent_from_the_whitelist() { + let api_server = start_default_api_server().await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_eq!(response.status(), 200); + assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); + } + + #[tokio::test] + async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_token_not_valid(response).await; + + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_allow_reload_the_whitelist_from_the_database() { + let api_server = start_default_api_server().await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; + + assert_eq!(response.status(), 200); + /* This assert fails because the whitelist has not been reloaded yet. + We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent + is whitelisted and use that endpoint to check if the torrent is still there after reloading. + assert!( + !(api_server + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await) + ); + */ + } } mod for_key_resources { - use torrust_tracker::tracker::auth; + use std::time::Duration; + use torrust_tracker::api::resource::auth_key::AuthKey; + use torrust_tracker::tracker::auth::Key; + + use super::{connection_with_invalid_token, connection_with_no_token}; use crate::api::{start_default_api_server, Client}; + use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { @@ -210,12 +426,127 @@ mod tracker_api { let seconds_valid = 60; - let auth_key = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; // Verify the key with the tracker - assert!(api_server.tracker.verify_auth_key(&auth::Key::from(auth_key)).await.is_ok()); + assert!(api_server + .tracker + .verify_auth_key(&Key::from(response.json::().await.unwrap())) + .await + .is_ok()); + } + + #[tokio::test] + async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_allow_deleting_an_auth_key() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(api_server.get_connection_info()) + .delete_auth_key(&auth_key.key) + .await; + + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); + } + + #[tokio::test] + async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + + // Generate new auth key + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .delete_auth_key(&auth_key.key) + .await; + + assert_token_not_valid(response).await; + + // Generate new auth key + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .delete_auth_key(&auth_key.key) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_allow_reloading_keys() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(api_server.get_connection_info()).reload_keys().await; + + assert_eq!(response.status(), 200); + } + + #[tokio::test] + async fn should_not_allow_reloading_keys_for_unauthenticated_users() { + let api_server = start_default_api_server().await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + .reload_keys() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + .reload_keys() + .await; + + assert_unauthorized(response).await; } } } From 901bc342721d137c9ca2b571a7e5632c33142b4b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jan 2023 16:36:38 +0000 Subject: [PATCH 218/435] feat: [#143] add axum dependency We are going to reimplement the API with Axum. --- Cargo.lock | 125 +++++++++++++++++++++++++++++++++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 126 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index e3a6d9c09..8e40508dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -96,6 +96,56 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "axum" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "base-x" version = "0.2.11" @@ -930,6 +980,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.8.0" @@ -1214,6 +1270,12 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" + [[package]] name = "memchr" version = "2.5.0" @@ -2028,6 +2090,12 @@ dependencies = [ "base64", ] +[[package]] +name = "rustversion" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" + [[package]] name = "ryu" version = "1.0.11" @@ -2187,6 +2255,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2420,6 +2497,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "tap" version = "1.0.1" @@ -2680,6 +2763,7 @@ version = "2.3.0" dependencies = [ "aquatic_udp_protocol", "async-trait", + "axum", "binascii", "chrono", "config", @@ -2708,6 +2792,47 @@ dependencies = [ "warp", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +dependencies = [ + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" version = "0.3.2" diff --git a/Cargo.toml b/Cargo.toml index 6e835bcb5..8ddefe78e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,6 +58,7 @@ async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } +axum = "0.6.1" [dev-dependencies] mockall = "0.11" From cbf88377950d0208cf3f4d8641978cbd1a1a823f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jan 2023 16:38:09 +0000 Subject: [PATCH 219/435] feat(api): [#143] scaffolding for new API using Axum - Test scaffolding - Dummy entrypoint --- src/apis/mod.rs | 2 + src/apis/routes.rs | 7 ++ src/apis/server.rs | 35 +++++++++ src/config.rs | 2 +- src/jobs/mod.rs | 1 + src/jobs/tracker_apis.rs | 54 +++++++++++++ src/lib.rs | 1 + src/setup.rs | 19 ++++- tests/api/mod.rs | 61 ++++++++++---- tests/tracker_api.rs | 166 +++++++++++++++++++++++++++------------ 10 files changed, 279 insertions(+), 69 deletions(-) create mode 100644 src/apis/mod.rs create mode 100644 src/apis/routes.rs create mode 100644 src/apis/server.rs create mode 100644 src/jobs/tracker_apis.rs diff --git a/src/apis/mod.rs b/src/apis/mod.rs new file mode 100644 index 000000000..c2ee0fc38 --- /dev/null +++ b/src/apis/mod.rs @@ -0,0 +1,2 @@ +pub mod server; +pub mod routes; diff --git a/src/apis/routes.rs b/src/apis/routes.rs new file mode 100644 index 000000000..2db23c35f --- /dev/null +++ b/src/apis/routes.rs @@ -0,0 +1,7 @@ +use axum::response::Json; +use serde_json::{json, Value}; + +#[allow(clippy::unused_async)] +pub async fn root() -> Json { + Json(json!({ "data": 42 })) +} diff --git a/src/apis/server.rs b/src/apis/server.rs new file mode 100644 index 000000000..3bef75367 --- /dev/null +++ b/src/apis/server.rs @@ -0,0 +1,35 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; +use futures::Future; +use warp::hyper; + +use super::routes::root; +use crate::tracker; + +pub fn start(socket_addr: SocketAddr, _tracker: &Arc) -> impl Future> { + let app = Router::new().route("/", get(root)); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }) +} + +pub fn start_tls( + socket_addr: SocketAddr, + _ssl_cert_path: &str, + _ssl_key_path: &str, + _tracker: &Arc, +) -> impl Future> { + let app = Router::new().route("/", get(root)); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }) +} diff --git a/src/config.rs b/src/config.rs index 48e28b358..820af77d8 100644 --- a/src/config.rs +++ b/src/config.rs @@ -31,7 +31,7 @@ pub struct HttpTracker { } #[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpApi { pub enabled: bool, pub bind_address: String, diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index 8b8f0662b..a06e7d53c 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -2,3 +2,4 @@ pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_api; pub mod udp_tracker; +pub mod tracker_apis; diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs new file mode 100644 index 000000000..b696c923d --- /dev/null +++ b/src/jobs/tracker_apis.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; + +use log::info; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; + +use crate::apis::server; +use crate::config::HttpApi; +use crate::tracker; + +#[derive(Debug)] +pub struct ApiServerJobStarted(); + +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. +pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); + + let (tx, rx) = oneshot::channel::(); + + // Run the API server + let join_handle = tokio::spawn(async move { + if !ssl_enabled { + info!("Starting Torrust APIs server on: http://{}", bind_addr); + let handle = server::start(bind_addr, &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + if let Ok(()) = handle.await { + info!("Stopping Torrust APIs server on {} ...", bind_addr); + } + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust APIs server on: https://{}", bind_addr); + let handle = server::start_tls(bind_addr, &ssl_cert_path.unwrap(), &ssl_key_path.unwrap(), &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + if let Ok(()) = handle.await { + info!("Stopping Torrust APIs server on {} ...", bind_addr); + } + } + }); + + // Wait until the APIs server job is running + match rx.await { + Ok(_msg) => info!("Torrust APIs server started"), + Err(e) => panic!("the apis server was dropped: {e}"), + } + + join_handle +} diff --git a/src/lib.rs b/src/lib.rs index 7e4fe13a7..6edb96dfd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ pub mod setup; pub mod stats; pub mod tracker; pub mod udp; +pub mod apis; #[macro_use] extern crate lazy_static; diff --git a/src/setup.rs b/src/setup.rs index c045310bb..84a1d1c3c 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -1,12 +1,16 @@ +use std::net::SocketAddr; use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; use crate::config::Configuration; -use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; +use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, tracker_apis, udp_tracker}; use crate::tracker; +/// # Panics +/// +/// Will panic if the socket address for API can't be parsed. pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); @@ -52,6 +56,19 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve jobs.push(tracker_api::start_job(&config.http_api, tracker.clone()).await); } + // Start HTTP APIs server (multiple API versions) + if config.http_api.enabled { + // Temporarily running the new API in the 1313 port + let bind_address = config.http_api.bind_address.clone(); + let mut bind_socket: SocketAddr = bind_address.parse().unwrap(); + bind_socket.set_port(1313); + + let mut http_apis_config = config.http_api.clone(); + http_apis_config.bind_address = bind_socket.to_string(); + + jobs.push(tracker_apis::start_job(&http_apis_config, tracker.clone()).await); + } + // Remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { jobs.push(torrent_cleanup::start_job(config, &tracker)); diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 1528888bf..49fde7a81 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; use torrust_tracker::config::Configuration; -use torrust_tracker::jobs::tracker_api; +use torrust_tracker::jobs::{tracker_api, tracker_apis}; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::{self, Peer}; @@ -67,16 +67,38 @@ impl ConnectionInfo { } } -pub async fn start_default_api_server() -> Server { +pub async fn start_default_api_server(version: &Version) -> Server { let configuration = tracker_configuration(); - start_custom_api_server(configuration.clone()).await + start_custom_api_server(configuration.clone(), version).await } -pub async fn start_custom_api_server(configuration: Arc) -> Server { - start(configuration).await +pub async fn start_custom_api_server(configuration: Arc, version: &Version) -> Server { + match &version { + Version::Warp => start_warp_api(configuration).await, + Version::Axum => start_axum_api(configuration).await, + } +} + +async fn start_warp_api(configuration: Arc) -> Server { + let server = start(&configuration); + + // Start the HTTP API job + tracker_api::start_job(&configuration.http_api, server.tracker.clone()).await; + + server +} + +async fn start_axum_api(configuration: Arc) -> Server { + let server = start(&configuration); + + // Start HTTP APIs server (multiple API versions) + // Temporarily run the new API on a port number after the current API port + tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; + + server } -async fn start(configuration: Arc) -> Server { +fn start(configuration: &Arc) -> Server { let connection_info = ConnectionInfo::authenticated( &configuration.http_api.bind_address.clone(), &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), @@ -92,7 +114,7 @@ async fn start(configuration: Arc) -> Server { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { + let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) @@ -100,10 +122,7 @@ async fn start(configuration: Arc) -> Server { }; // Initialize logging - logging::setup(&configuration); - - // Start the HTTP API job - tracker_api::start_job(&configuration.http_api, tracker.clone()).await; + logging::setup(configuration); Server { tracker, @@ -133,6 +152,7 @@ impl Server { pub struct Client { connection_info: ConnectionInfo, + base_path: String, } type ReqwestQuery = Vec; @@ -194,9 +214,20 @@ impl From for ReqwestQueryParam { } } +pub enum Version { + Warp, + Axum, +} + impl Client { - pub fn new(connection_info: ConnectionInfo) -> Self { - Self { connection_info } + pub fn new(connection_info: ConnectionInfo, version: &Version) -> Self { + Self { + connection_info, + base_path: match version { + Version::Warp => "/api/".to_string(), + Version::Axum => String::new(), + }, + } } pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { @@ -235,7 +266,7 @@ impl Client { self.get("stats", Query::default()).await } - async fn get(&self, path: &str, params: Query) -> Response { + pub async fn get(&self, path: &str, params: Query) -> Response { let mut query: Query = params; if let Some(token) = &self.connection_info.api_token { @@ -271,7 +302,7 @@ impl Client { } fn base_url(&self, path: &str) -> String { - format!("http://{}/api/{path}", &self.connection_info.bind_address) + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) } fn query_with_token(&self) -> Query { diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index d02f29374..68a295ac3 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -3,6 +3,14 @@ /// ```text /// cargo test tracker_api -- --nocapture /// ``` +/// +/// WIP. We are implementing a new API replacing Warp with Axum. +/// The new API runs in parallel until we finish all endpoints. +/// You can test the new API with: +/// +/// ```text +/// cargo test tracker_apis -- --nocapture +/// ``` extern crate rand; mod api; @@ -70,12 +78,12 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{sample_peer, start_default_api_server, Client}; + use crate::api::{sample_peer, start_default_api_server, Client, Version}; use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; api_server .add_torrent( @@ -84,7 +92,9 @@ mod tracker_api { ) .await; - let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .get_tracker_statistics() + .await; assert_eq!(response.status(), 200); assert_eq!( @@ -112,15 +122,15 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_tracker_statistics() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .get_tracker_statistics() .await; @@ -136,18 +146,18 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{sample_peer, start_default_api_server, Client, Query, QueryParam}; + use crate::api::{sample_peer, start_default_api_server, Client, Query, QueryParam, Version}; use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .get_torrents(Query::empty()) .await; @@ -166,7 +176,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -175,7 +185,7 @@ mod tracker_api { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -194,7 +204,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -203,7 +213,7 @@ mod tracker_api { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -222,15 +232,15 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_torrents(Query::empty()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .get_torrents(Query::default()) .await; @@ -239,7 +249,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -247,7 +257,7 @@ mod tracker_api { api_server.add_torrent(&info_hash, &peer).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .get_torrent(&info_hash.to_string()) .await; @@ -266,19 +276,19 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_torrent(&info_hash.to_string()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .get_torrent(&info_hash.to_string()) .await; @@ -292,16 +302,16 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; use super::{assert_token_not_valid, connection_with_invalid_token, connection_with_no_token}; - use crate::api::{start_default_api_server, Client}; + use crate::api::{start_default_api_server, Client, Version}; use crate::tracker_api::assert_unauthorized; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info()) + let res = Client::new(api_server.get_connection_info(), &Version::Warp) .whitelist_a_torrent(&info_hash) .await; @@ -316,11 +326,11 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info()); + let api_client = Client::new(api_server.get_connection_info(), &Version::Warp); let res = api_client.whitelist_a_torrent(&info_hash).await; assert_eq!(res.status(), 200); @@ -331,17 +341,17 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .whitelist_a_torrent(&info_hash) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .whitelist_a_torrent(&info_hash) .await; @@ -350,13 +360,13 @@ mod tracker_api { #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; @@ -366,20 +376,20 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; assert_token_not_valid(response).await; api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; @@ -388,13 +398,15 @@ mod tracker_api { #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .reload_whitelist() + .await; assert_eq!(response.status(), 200); /* This assert fails because the whitelist has not been reloaded yet. @@ -417,16 +429,16 @@ mod tracker_api { use torrust_tracker::tracker::auth::Key; use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{start_default_api_server, Client}; + use crate::api::{start_default_api_server, Client, Version}; use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .generate_auth_key(seconds_valid) .await; @@ -440,17 +452,17 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .generate_auth_key(seconds_valid) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .generate_auth_key(seconds_valid) .await; @@ -459,7 +471,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; let auth_key = api_server @@ -468,7 +480,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .delete_auth_key(&auth_key.key) .await; @@ -478,7 +490,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; @@ -489,7 +501,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .delete_auth_key(&auth_key.key) .await; @@ -502,7 +514,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .delete_auth_key(&auth_key.key) .await; @@ -511,7 +523,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_reloading_keys() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; api_server @@ -520,14 +532,16 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(api_server.get_connection_info()).reload_keys().await; + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .reload_keys() + .await; assert_eq!(response.status(), 200); } #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api_server().await; + let api_server = start_default_api_server(&Version::Warp).await; let seconds_valid = 60; api_server @@ -536,13 +550,13 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .reload_keys() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) .reload_keys() .await; @@ -550,3 +564,51 @@ mod tracker_api { } } } + +mod tracker_apis { + + /* + + Endpoints: + + Root: + - [x] GET / + + Stats: + - [ ] GET /api/stats + + Torrents: + - [ ] GET /api/torrents?offset=:u32&limit=:u32 + - [ ] GET /api/torrent/:info_hash + + Whitelisted torrents: + - [ ] POST /api/whitelist/:info_hash + - [ ] DELETE /api/whitelist/:info_hash + + Whitelist commands: + - [ ] GET /api/whitelist/reload + + Keys: + - [ ] POST /api/key/:seconds_valid + - [ ] DELETE /api/key/:key + + Key commands + - [ ] GET /api/keys/reload + + */ + + mod for_entrypoint { + use crate::api::{start_default_api_server, Client, Query, Version}; + + #[tokio::test] + async fn test_entrypoint() { + let api_server = start_default_api_server(&Version::Axum).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get("/", Query::default()) + .await; + + assert_eq!(response.status(), 200); + } + } +} From 5ee3f93fb515cc26a43a5cab45d84a2011c0cbb7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jan 2023 17:30:23 +0000 Subject: [PATCH 220/435] refactor(api): [#143] extract mods for API testing Code for API testing have been reorganized. --- src/apis/mod.rs | 2 +- src/apis/server.rs | 2 + src/jobs/mod.rs | 2 +- src/lib.rs | 2 +- tests/api/asserts.rs | 17 ++ tests/api/client.rs | 162 ++++++++++++++++++ tests/api/connection_info.rs | 29 ++++ tests/api/fixtures.rs | 17 ++ tests/api/mod.rs | 314 +---------------------------------- tests/api/server.rs | 115 +++++++++++++ tests/tracker_api.rs | 109 ++++++------ 11 files changed, 397 insertions(+), 374 deletions(-) create mode 100644 tests/api/asserts.rs create mode 100644 tests/api/client.rs create mode 100644 tests/api/connection_info.rs create mode 100644 tests/api/fixtures.rs create mode 100644 tests/api/server.rs diff --git a/src/apis/mod.rs b/src/apis/mod.rs index c2ee0fc38..f2ec6ffbd 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,2 +1,2 @@ -pub mod server; pub mod routes; +pub mod server; diff --git a/src/apis/server.rs b/src/apis/server.rs index 3bef75367..d42ae8950 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -25,6 +25,8 @@ pub fn start_tls( _ssl_key_path: &str, _tracker: &Arc, ) -> impl Future> { + // todo: for the time being, it's just a copy & paste from start(...). + let app = Router::new().route("/", get(root)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index a06e7d53c..6f9b12bac 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -1,5 +1,5 @@ pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_api; -pub mod udp_tracker; pub mod tracker_apis; +pub mod udp_tracker; diff --git a/src/lib.rs b/src/lib.rs index 6edb96dfd..ebf589aa9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,5 @@ pub mod api; +pub mod apis; pub mod config; pub mod databases; pub mod http; @@ -9,7 +10,6 @@ pub mod setup; pub mod stats; pub mod tracker; pub mod udp; -pub mod apis; #[macro_use] extern crate lazy_static; diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs new file mode 100644 index 000000000..5d664d5c4 --- /dev/null +++ b/tests/api/asserts.rs @@ -0,0 +1,17 @@ +use reqwest::Response; + +pub async fn assert_token_not_valid(response: Response) { + assert_eq!(response.status(), 500); + assert_eq!( + response.text().await.unwrap(), + "Unhandled rejection: Err { reason: \"token not valid\" }" + ); +} + +pub async fn assert_unauthorized(response: Response) { + assert_eq!(response.status(), 500); + assert_eq!( + response.text().await.unwrap(), + "Unhandled rejection: Err { reason: \"unauthorized\" }" + ); +} diff --git a/tests/api/client.rs b/tests/api/client.rs new file mode 100644 index 000000000..e507d817f --- /dev/null +++ b/tests/api/client.rs @@ -0,0 +1,162 @@ +use reqwest::Response; + +use super::connection_info::ConnectionInfo; +use super::Version; + +pub struct Client { + connection_info: ConnectionInfo, + base_path: String, +} + +type ReqwestQuery = Vec; +type ReqwestQueryParam = (String, String); + +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } + + fn with_token(token: &str) -> Self { + Self { + params: vec![QueryParam::new("token", token)], + } + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} + +impl Client { + pub fn new(connection_info: ConnectionInfo, version: &Version) -> Self { + Self { + connection_info, + base_path: match version { + Version::Warp => "/api/".to_string(), + Version::Axum => "/".to_string(), + }, + } + } + + pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { + self.post(&format!("key/{}", &seconds_valid)).await + } + + pub async fn delete_auth_key(&self, key: &str) -> Response { + self.delete(&format!("key/{}", &key)).await + } + + pub async fn reload_keys(&self) -> Response { + self.get("keys/reload", Query::default()).await + } + + pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { + self.post(&format!("whitelist/{}", &info_hash)).await + } + + pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { + self.delete(&format!("whitelist/{}", &info_hash)).await + } + + pub async fn reload_whitelist(&self) -> Response { + self.get("whitelist/reload", Query::default()).await + } + + pub async fn get_torrent(&self, info_hash: &str) -> Response { + self.get(&format!("torrent/{}", &info_hash), Query::default()).await + } + + pub async fn get_torrents(&self, params: Query) -> Response { + self.get("torrents", params).await + } + + pub async fn get_tracker_statistics(&self) -> Response { + self.get("stats", Query::default()).await + } + + pub async fn get(&self, path: &str, params: Query) -> Response { + let mut query: Query = params; + + if let Some(token) = &self.connection_info.api_token { + query.add_param(QueryParam::new("token", token)); + }; + + reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path)) + .query(&ReqwestQuery::from(query)) + .send() + .await + .unwrap() + } + + async fn post(&self, path: &str) -> Response { + reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + async fn delete(&self, path: &str) -> Response { + reqwest::Client::new() + .delete(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + fn base_url(&self, path: &str) -> String { + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + } + + fn query_with_token(&self) -> Query { + match &self.connection_info.api_token { + Some(token) => Query::with_token(token), + None => Query::default(), + } + } +} diff --git a/tests/api/connection_info.rs b/tests/api/connection_info.rs new file mode 100644 index 000000000..35314a2fd --- /dev/null +++ b/tests/api/connection_info.rs @@ -0,0 +1,29 @@ +pub fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::authenticated(bind_address, "invalid token") +} + +pub fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::anonymous(bind_address) +} + +#[derive(Clone)] +pub struct ConnectionInfo { + pub bind_address: String, + pub api_token: Option, +} + +impl ConnectionInfo { + pub fn authenticated(bind_address: &str, api_token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: Some(api_token.to_string()), + } + } + + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: None, + } + } +} diff --git a/tests/api/fixtures.rs b/tests/api/fixtures.rs new file mode 100644 index 000000000..fa6099309 --- /dev/null +++ b/tests/api/fixtures.rs @@ -0,0 +1,17 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; +use torrust_tracker::tracker::peer; + +pub fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } +} diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 49fde7a81..52980581f 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -1,314 +1,10 @@ -use core::panic; -use std::env; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::sync::Arc; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use reqwest::Response; -use torrust_tracker::config::Configuration; -use torrust_tracker::jobs::{tracker_api, tracker_apis}; -use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; -use torrust_tracker::protocol::info_hash::InfoHash; -use torrust_tracker::tracker::peer::{self, Peer}; -use torrust_tracker::tracker::statistics::Keeper; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - -use crate::common::ephemeral_random_port; - -pub fn sample_peer() -> peer::Peer { - peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - } -} - -pub fn tracker_configuration() -> Arc { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.http_api.bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - Arc::new(config) -} - -#[derive(Clone)] -pub struct ConnectionInfo { - pub bind_address: String, - pub api_token: Option, -} - -impl ConnectionInfo { - pub fn authenticated(bind_address: &str, api_token: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - api_token: Some(api_token.to_string()), - } - } - - pub fn anonymous(bind_address: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - api_token: None, - } - } -} - -pub async fn start_default_api_server(version: &Version) -> Server { - let configuration = tracker_configuration(); - start_custom_api_server(configuration.clone(), version).await -} - -pub async fn start_custom_api_server(configuration: Arc, version: &Version) -> Server { - match &version { - Version::Warp => start_warp_api(configuration).await, - Version::Axum => start_axum_api(configuration).await, - } -} - -async fn start_warp_api(configuration: Arc) -> Server { - let server = start(&configuration); - - // Start the HTTP API job - tracker_api::start_job(&configuration.http_api, server.tracker.clone()).await; - - server -} - -async fn start_axum_api(configuration: Arc) -> Server { - let server = start(&configuration); - - // Start HTTP APIs server (multiple API versions) - // Temporarily run the new API on a port number after the current API port - tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; - - server -} - -fn start(configuration: &Arc) -> Server { - let connection_info = ConnectionInfo::authenticated( - &configuration.http_api.bind_address.clone(), - &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), - ); - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - Server { - tracker, - connection_info, - } -} - -pub struct Server { - pub tracker: Arc, - pub connection_info: ConnectionInfo, -} - -impl Server { - pub fn get_connection_info(&self) -> ConnectionInfo { - self.connection_info.clone() - } - - pub fn get_bind_address(&self) -> String { - self.connection_info.bind_address.clone() - } - - /// Add a torrent to the tracker - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} - -pub struct Client { - connection_info: ConnectionInfo, - base_path: String, -} - -type ReqwestQuery = Vec; -type ReqwestQueryParam = (String, String); - -#[derive(Default, Debug)] -pub struct Query { - params: Vec, -} - -impl Query { - pub fn empty() -> Self { - Self { params: vec![] } - } - - pub fn params(params: Vec) -> Self { - Self { params } - } - - pub fn add_param(&mut self, param: QueryParam) { - self.params.push(param); - } - - fn with_token(token: &str) -> Self { - Self { - params: vec![QueryParam::new("token", token)], - } - } -} - -impl From for ReqwestQuery { - fn from(url_search_params: Query) -> Self { - url_search_params - .params - .iter() - .map(|param| ReqwestQueryParam::from((*param).clone())) - .collect() - } -} - -#[derive(Clone, Debug)] -pub struct QueryParam { - name: String, - value: String, -} - -impl QueryParam { - pub fn new(name: &str, value: &str) -> Self { - Self { - name: name.to_string(), - value: value.to_string(), - } - } -} - -impl From for ReqwestQueryParam { - fn from(param: QueryParam) -> Self { - (param.name, param.value) - } -} +pub mod asserts; +pub mod client; +pub mod connection_info; +pub mod fixtures; +pub mod server; pub enum Version { Warp, Axum, } - -impl Client { - pub fn new(connection_info: ConnectionInfo, version: &Version) -> Self { - Self { - connection_info, - base_path: match version { - Version::Warp => "/api/".to_string(), - Version::Axum => String::new(), - }, - } - } - - pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { - self.post(&format!("key/{}", &seconds_valid)).await - } - - pub async fn delete_auth_key(&self, key: &str) -> Response { - self.delete(&format!("key/{}", &key)).await - } - - pub async fn reload_keys(&self) -> Response { - self.get("keys/reload", Query::default()).await - } - - pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { - self.post(&format!("whitelist/{}", &info_hash)).await - } - - pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { - self.delete(&format!("whitelist/{}", &info_hash)).await - } - - pub async fn reload_whitelist(&self) -> Response { - self.get("whitelist/reload", Query::default()).await - } - - pub async fn get_torrent(&self, info_hash: &str) -> Response { - self.get(&format!("torrent/{}", &info_hash), Query::default()).await - } - - pub async fn get_torrents(&self, params: Query) -> Response { - self.get("torrents", params).await - } - - pub async fn get_tracker_statistics(&self) -> Response { - self.get("stats", Query::default()).await - } - - pub async fn get(&self, path: &str, params: Query) -> Response { - let mut query: Query = params; - - if let Some(token) = &self.connection_info.api_token { - query.add_param(QueryParam::new("token", token)); - }; - - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .query(&ReqwestQuery::from(query)) - .send() - .await - .unwrap() - } - - async fn post(&self, path: &str) -> Response { - reqwest::Client::new() - .post(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())) - .send() - .await - .unwrap() - } - - async fn delete(&self, path: &str) -> Response { - reqwest::Client::new() - .delete(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())) - .send() - .await - .unwrap() - } - - fn base_url(&self, path: &str) -> String { - format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) - } - - fn query_with_token(&self) -> Query { - match &self.connection_info.api_token { - Some(token) => Query::with_token(token), - None => Query::default(), - } - } -} diff --git a/tests/api/server.rs b/tests/api/server.rs new file mode 100644 index 000000000..338b068c8 --- /dev/null +++ b/tests/api/server.rs @@ -0,0 +1,115 @@ +use core::panic; +use std::env; +use std::sync::Arc; + +use torrust_tracker::config::Configuration; +use torrust_tracker::jobs::{tracker_api, tracker_apis}; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; + +use super::connection_info::ConnectionInfo; +use super::Version; +use crate::common::ephemeral_random_port; + +pub fn tracker_configuration() -> Arc { + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; + + // Ephemeral socket address + let port = ephemeral_random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &port); + + // Ephemeral database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}.db", &port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + Arc::new(config) +} + +pub async fn start_default_api(version: &Version) -> Server { + let configuration = tracker_configuration(); + start_custom_api(configuration.clone(), version).await +} + +pub async fn start_custom_api(configuration: Arc, version: &Version) -> Server { + match &version { + Version::Warp => start_warp_api(configuration).await, + Version::Axum => start_axum_api(configuration).await, + } +} + +async fn start_warp_api(configuration: Arc) -> Server { + let server = start(&configuration); + + // Start the HTTP API job + tracker_api::start_job(&configuration.http_api, server.tracker.clone()).await; + + server +} + +async fn start_axum_api(configuration: Arc) -> Server { + let server = start(&configuration); + + // Start HTTP APIs server (multiple API versions) + // Temporarily run the new API on a port number after the current API port + tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; + + server +} + +fn start(configuration: &Arc) -> Server { + let connection_info = ConnectionInfo::authenticated( + &configuration.http_api.bind_address.clone(), + &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), + ); + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + Server { + tracker, + connection_info, + } +} + +pub struct Server { + pub tracker: Arc, + pub connection_info: ConnectionInfo, +} + +impl Server { + pub fn get_connection_info(&self) -> ConnectionInfo { + self.connection_info.clone() + } + + pub fn get_bind_address(&self) -> String { + self.connection_info.bind_address.clone() + } + + /// Add a torrent to the tracker + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 68a295ac3..5f022167b 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -43,47 +43,22 @@ mod tracker_api { */ - use reqwest::Response; - - use crate::api::ConnectionInfo; - - async fn assert_token_not_valid(response: Response) { - assert_eq!(response.status(), 500); - assert_eq!( - response.text().await.unwrap(), - "Unhandled rejection: Err { reason: \"token not valid\" }" - ); - } - - async fn assert_unauthorized(response: Response) { - assert_eq!(response.status(), 500); - assert_eq!( - response.text().await.unwrap(), - "Unhandled rejection: Err { reason: \"unauthorized\" }" - ); - } - - fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { - ConnectionInfo::authenticated(bind_address, "invalid token") - } - - fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { - ConnectionInfo::anonymous(bind_address) - } - mod for_stats_resources { use std::str::FromStr; use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{sample_peer, start_default_api_server, Client, Version}; - use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::fixtures::sample_peer; + use crate::api::server::start_default_api; + use crate::api::Version; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; api_server .add_torrent( @@ -122,7 +97,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_tracker_statistics() @@ -145,13 +120,16 @@ mod tracker_api { use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{sample_peer, start_default_api_server, Client, Query, QueryParam, Version}; - use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::{Client, Query, QueryParam}; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::fixtures::sample_peer; + use crate::api::server::start_default_api; + use crate::api::Version; #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -176,7 +154,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -204,7 +182,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -232,7 +210,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) .get_torrents(Query::empty()) @@ -249,7 +227,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -276,7 +254,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -301,13 +279,15 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; - use super::{assert_token_not_valid, connection_with_invalid_token, connection_with_no_token}; - use crate::api::{start_default_api_server, Client, Version}; - use crate::tracker_api::assert_unauthorized; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::server::start_default_api; + use crate::api::Version; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -326,7 +306,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -341,7 +321,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -360,7 +340,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -376,7 +356,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -398,7 +378,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -428,13 +408,15 @@ mod tracker_api { use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; - use super::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::{start_default_api_server, Client, Version}; - use crate::tracker_api::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::server::start_default_api; + use crate::api::Version; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -452,7 +434,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -471,7 +453,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; let auth_key = api_server @@ -490,7 +472,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -523,7 +505,7 @@ mod tracker_api { #[tokio::test] async fn should_allow_reloading_keys() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; api_server @@ -541,7 +523,7 @@ mod tracker_api { #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api_server(&Version::Warp).await; + let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; api_server @@ -565,13 +547,14 @@ mod tracker_api { } } +/// The new API implementation using Axum mod tracker_apis { /* Endpoints: - Root: + Root (dummy endpoint to test Axum configuration. To be removed): - [x] GET / Stats: @@ -598,7 +581,9 @@ mod tracker_apis { */ mod for_entrypoint { - use crate::api::{start_default_api_server, Client, Query, Version}; + use crate::api::client::{Client, Query}; + use crate::api::server::start_default_api_server; + use crate::api::Version; #[tokio::test] async fn test_entrypoint() { From 6a9e2d5a9f1396e4486b95d2e187740f368549bf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jan 2023 18:06:38 +0000 Subject: [PATCH 221/435] feat(api): [#143] axum api, GET /stats endpoint --- src/apis/routes.rs | 58 ++++++++++++++++++++++++++++++++++++++++++++ src/apis/server.rs | 8 +++--- tests/tracker_api.rs | 57 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 117 insertions(+), 6 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 2db23c35f..1b40ac47e 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,7 +1,65 @@ +use std::sync::Arc; + +use axum::extract::State; use axum::response::Json; use serde_json::{json, Value}; +use crate::api::resource::stats::Stats; +use crate::tracker::Tracker; + #[allow(clippy::unused_async)] pub async fn root() -> Json { Json(json!({ "data": 42 })) } + +#[allow(clippy::unused_async)] +pub async fn get_stats(State(tracker): State>) -> Json { + let mut results = Stats { + torrents: 0, + seeders: 0, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }; + + let db = tracker.get_torrents().await; + + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }); + + let stats = tracker.get_stats().await; + + #[allow(clippy::cast_possible_truncation)] + { + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + } + + Json(json!(results)) +} diff --git a/src/apis/server.rs b/src/apis/server.rs index d42ae8950..fb532519f 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -6,11 +6,13 @@ use axum::Router; use futures::Future; use warp::hyper; -use super::routes::root; +use super::routes::{get_stats, root}; use crate::tracker; -pub fn start(socket_addr: SocketAddr, _tracker: &Arc) -> impl Future> { - let app = Router::new().route("/", get(root)); +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { + let app = Router::new() + .route("/", get(root)) + .route("/stats", get(get_stats).with_state(tracker.clone())); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 5f022167b..bac9d1324 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -582,18 +582,69 @@ mod tracker_apis { mod for_entrypoint { use crate::api::client::{Client, Query}; - use crate::api::server::start_default_api_server; + use crate::api::server::start_default_api; use crate::api::Version; #[tokio::test] async fn test_entrypoint() { - let api_server = start_default_api_server(&Version::Axum).await; + let api_server = start_default_api(&Version::Axum).await; let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .get("/", Query::default()) + .get("", Query::default()) .await; assert_eq!(response.status(), 200); } } + + mod for_stats_resources { + use std::str::FromStr; + + use torrust_tracker::api::resource::stats::Stats; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::client::Client; + use crate::api::fixtures::sample_peer; + use crate::api::server::start_default_api; + use crate::api::Version; + + #[tokio::test] + async fn should_allow_getting_tracker_statistics() { + let api_server = start_default_api(&Version::Axum).await; + + api_server + .add_torrent( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &sample_peer(), + ) + .await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_tracker_statistics() + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::().await.unwrap(), + Stats { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + } + ); + } + } } From 7331c82d13f7ef01028c5600ecaa966ac73ef955 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Jan 2023 09:25:10 +0000 Subject: [PATCH 222/435] refactor: [#143] replace unwrap with expect --- src/setup.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/setup.rs b/src/setup.rs index 84a1d1c3c..daee7eea8 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -60,7 +60,9 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if config.http_api.enabled { // Temporarily running the new API in the 1313 port let bind_address = config.http_api.bind_address.clone(); - let mut bind_socket: SocketAddr = bind_address.parse().unwrap(); + let mut bind_socket: SocketAddr = bind_address + .parse() + .expect("bind address should be a valid socket address, for example 127.0.0.1:8080"); bind_socket.set_port(1313); let mut http_apis_config = config.http_api.clone(); From 0615c9f028853285fbc6aabac604a623c4d278bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Jan 2023 14:10:54 +0000 Subject: [PATCH 223/435] refactor(api): [#143] remove duplicate code - Extract domain logic: `Tracker::get_torrents_metrics`. - Move domain logic from web framework controllers to domain services: `get_metrics`. - Remove duplicate code in current Warp API and new Axum API. --- src/api/resource/stats.rs | 57 +++++++++++++++++++++--------- src/api/routes.rs | 50 ++------------------------ src/apis/routes.rs | 50 ++------------------------ src/tracker/mod.rs | 29 +++++++++++++++ src/tracker/services/mod.rs | 1 + src/tracker/services/statistics.rs | 32 +++++++++++++++++ 6 files changed, 107 insertions(+), 112 deletions(-) create mode 100644 src/tracker/services/mod.rs create mode 100644 src/tracker/services/statistics.rs diff --git a/src/api/resource/stats.rs b/src/api/resource/stats.rs index e87f08f63..c861876fa 100644 --- a/src/api/resource/stats.rs +++ b/src/api/resource/stats.rs @@ -1,21 +1,46 @@ use serde::{Deserialize, Serialize}; +use crate::tracker::services::statistics::TrackerMetrics; + #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Stats { - pub torrents: u32, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, - pub tcp4_connections_handled: u32, - pub tcp4_announces_handled: u32, - pub tcp4_scrapes_handled: u32, - pub tcp6_connections_handled: u32, - pub tcp6_announces_handled: u32, - pub tcp6_scrapes_handled: u32, - pub udp4_connections_handled: u32, - pub udp4_announces_handled: u32, - pub udp4_scrapes_handled: u32, - pub udp6_connections_handled: u32, - pub udp6_announces_handled: u32, - pub udp6_scrapes_handled: u32, + pub torrents: u64, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + pub tcp4_connections_handled: u64, + pub tcp4_announces_handled: u64, + pub tcp4_scrapes_handled: u64, + pub tcp6_connections_handled: u64, + pub tcp6_announces_handled: u64, + pub tcp6_scrapes_handled: u64, + pub udp4_connections_handled: u64, + pub udp4_announces_handled: u64, + pub udp4_scrapes_handled: u64, + pub udp6_connections_handled: u64, + pub udp6_announces_handled: u64, + pub udp6_scrapes_handled: u64, +} + +impl From for Stats { + fn from(metrics: TrackerMetrics) -> Self { + Self { + torrents: metrics.torrents_metrics.torrents, + seeders: metrics.torrents_metrics.seeders, + completed: metrics.torrents_metrics.completed, + leechers: metrics.torrents_metrics.leechers, + tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, + tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, + tcp4_scrapes_handled: metrics.protocol_metrics.tcp4_scrapes_handled, + tcp6_connections_handled: metrics.protocol_metrics.tcp6_connections_handled, + tcp6_announces_handled: metrics.protocol_metrics.tcp6_announces_handled, + tcp6_scrapes_handled: metrics.protocol_metrics.tcp6_scrapes_handled, + udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, + udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, + udp4_scrapes_handled: metrics.protocol_metrics.udp4_scrapes_handled, + udp6_connections_handled: metrics.protocol_metrics.udp6_connections_handled, + udp6_announces_handled: metrics.protocol_metrics.udp6_announces_handled, + udp6_scrapes_handled: metrics.protocol_metrics.udp6_scrapes_handled, + } + } } diff --git a/src/api/routes.rs b/src/api/routes.rs index 76b449e9b..73f1269ef 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -13,6 +13,7 @@ use super::resource::torrent::{ListItem, Torrent}; use super::{ActionStatus, TorrentInfoQuery}; use crate::protocol::info_hash::InfoHash; use crate::tracker; +use crate::tracker::services::statistics::get_metrics; fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] @@ -91,54 +92,7 @@ pub fn routes(tracker: &Arc) -> impl Filter| async move { - let mut results = Stats { - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }; - - let db = tracker.get_torrents().await; - - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }); - - let stats = tracker.get_stats().await; - - #[allow(clippy::cast_possible_truncation)] - { - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - } - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + Result::<_, warp::reject::Rejection>::Ok(reply::json(&Stats::from(get_metrics(tracker.clone()).await))) }); // GET /api/torrent/:info_hash diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 1b40ac47e..58eefa8b0 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -5,6 +5,7 @@ use axum::response::Json; use serde_json::{json, Value}; use crate::api::resource::stats::Stats; +use crate::tracker::services::statistics::get_metrics; use crate::tracker::Tracker; #[allow(clippy::unused_async)] @@ -14,52 +15,5 @@ pub async fn root() -> Json { #[allow(clippy::unused_async)] pub async fn get_stats(State(tracker): State>) -> Json { - let mut results = Stats { - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }; - - let db = tracker.get_torrents().await; - - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }); - - let stats = tracker.get_stats().await; - - #[allow(clippy::cast_possible_truncation)] - { - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - } - - Json(json!(results)) + Json(json!(Stats::from(get_metrics(tracker.clone()).await))) } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 4469d682b..e0ea41123 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,6 +1,7 @@ pub mod auth; pub mod mode; pub mod peer; +pub mod services; pub mod statistics; pub mod torrent; @@ -28,6 +29,13 @@ pub struct Tracker { database: Box, } +pub struct TorrentsMetrics { + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + pub torrents: u64, +} + impl Tracker { /// # Errors /// @@ -277,6 +285,27 @@ impl Tracker { self.torrents.read().await } + pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { + let mut torrents_metrics = TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 0, + torrents: 0, + }; + + let db = self.get_torrents().await; + + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + torrents_metrics.seeders += u64::from(seeders); + torrents_metrics.completed += u64::from(completed); + torrents_metrics.leechers += u64::from(leechers); + torrents_metrics.torrents += 1; + }); + + torrents_metrics + } + pub async fn get_stats(&self) -> RwLockReadGuard<'_, statistics::Metrics> { self.stats_repository.get_stats().await } diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs new file mode 100644 index 000000000..3449ec7b4 --- /dev/null +++ b/src/tracker/services/mod.rs @@ -0,0 +1 @@ +pub mod statistics; diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs new file mode 100644 index 000000000..5f8f39856 --- /dev/null +++ b/src/tracker/services/statistics.rs @@ -0,0 +1,32 @@ +use std::sync::Arc; + +use crate::tracker::statistics::Metrics; +use crate::tracker::{TorrentsMetrics, Tracker}; + +pub struct TrackerMetrics { + pub torrents_metrics: TorrentsMetrics, + pub protocol_metrics: Metrics, +} + +pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { + let torrents_metrics = tracker.get_torrents_metrics().await; + let stats = tracker.get_stats().await; + + TrackerMetrics { + torrents_metrics, + protocol_metrics: Metrics { + tcp4_connections_handled: stats.tcp4_connections_handled, + tcp4_announces_handled: stats.tcp4_announces_handled, + tcp4_scrapes_handled: stats.tcp4_scrapes_handled, + tcp6_connections_handled: stats.tcp6_connections_handled, + tcp6_announces_handled: stats.tcp6_announces_handled, + tcp6_scrapes_handled: stats.tcp6_scrapes_handled, + udp4_connections_handled: stats.udp4_connections_handled, + udp4_announces_handled: stats.udp4_announces_handled, + udp4_scrapes_handled: stats.udp4_scrapes_handled, + udp6_connections_handled: stats.udp6_connections_handled, + udp6_announces_handled: stats.udp6_announces_handled, + udp6_scrapes_handled: stats.udp6_scrapes_handled, + }, + } +} From 0f99f7bc077d6b15494a910c930e4045f3db5613 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Jan 2023 17:24:15 +0000 Subject: [PATCH 224/435] refactor: [#143] remove duplicate or unneeded code --- src/config.rs | 35 ++++++++++++++++++++++++++++++ src/tracker/mod.rs | 1 + src/tracker/services/statistics.rs | 1 + src/tracker/statistics.rs | 32 +++------------------------ src/udp/handlers.rs | 28 +++--------------------- tests/api/server.rs | 20 ++--------------- tests/common/mod.rs | 8 ------- tests/tracker_api.rs | 1 - tests/udp.rs | 31 +++++++++----------------- 9 files changed, 55 insertions(+), 102 deletions(-) delete mode 100644 tests/common/mod.rs diff --git a/src/config.rs b/src/config.rs index 820af77d8..05a446454 100644 --- a/src/config.rs +++ b/src/config.rs @@ -5,6 +5,7 @@ use std::str::FromStr; use std::{env, fs}; use config::{Config, ConfigError, File, FileFormat}; +use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; @@ -73,6 +74,40 @@ pub enum Error { TrackerModeIncompatible, } +/// This configuration is used for testing. It generates random config values so they do not collide +/// if you run more than one tracker at the same time. +/// +/// # Panics +/// +/// Will panic if it can't convert the temp file path to string +#[must_use] +pub fn ephemeral_configuration() -> Configuration { + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; + + // Ephemeral socket addresses + let api_port = random_port(); + config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + let upd_port = random_port(); + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &upd_port); + + // Ephemeral sqlite database + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("data_{}_{}.db", &api_port, &upd_port)); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + config +} + +fn random_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) +} + impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index e0ea41123..4de168908 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -29,6 +29,7 @@ pub struct Tracker { database: Box, } +#[derive(Debug, PartialEq, Default)] pub struct TorrentsMetrics { pub seeders: u64, pub completed: u64, diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 5f8f39856..696ca2ea1 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use crate::tracker::statistics::Metrics; use crate::tracker::{TorrentsMetrics, Tracker}; +#[derive(Debug, PartialEq)] pub struct TrackerMetrics { pub torrents_metrics: TorrentsMetrics, pub protocol_metrics: Metrics, diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index b787e1267..f9f6253fd 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -23,7 +23,7 @@ pub enum Event { Udp6Scrape, } -#[derive(Debug)] +#[derive(Debug, PartialEq, Default)] pub struct Metrics { pub tcp4_connections_handled: u64, pub tcp4_announces_handled: u64, @@ -39,32 +39,6 @@ pub struct Metrics { pub udp6_scrapes_handled: u64, } -impl Default for Metrics { - fn default() -> Self { - Self::new() - } -} - -impl Metrics { - #[must_use] - pub fn new() -> Self { - Self { - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - } - } -} - pub struct Keeper { pub repository: Repo, } @@ -187,7 +161,7 @@ impl Repo { #[must_use] pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(Metrics::new())), + stats: Arc::new(RwLock::new(Metrics::default())), } } @@ -280,7 +254,7 @@ mod tests { let stats = stats_tracker.repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, Metrics::new().tcp4_announces_handled); + assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); } #[tokio::test] diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 001fb2380..076710fb6 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -239,14 +239,13 @@ fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { #[cfg(test)] mod tests { - use std::env; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use rand::{thread_rng, Rng}; - use crate::config::Configuration; + use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::{Current, Time}; use crate::tracker::{self, mode, peer, statistics}; @@ -255,28 +254,7 @@ mod tests { } fn default_testing_tracker_configuration() -> Configuration { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.http_api.bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - config - } - - fn ephemeral_random_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) + ephemeral_configuration() } fn initialized_public_tracker() -> Arc { diff --git a/tests/api/server.rs b/tests/api/server.rs index 338b068c8..9819a0847 100644 --- a/tests/api/server.rs +++ b/tests/api/server.rs @@ -1,8 +1,7 @@ use core::panic; -use std::env; use std::sync::Arc; -use torrust_tracker::config::Configuration; +use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::jobs::{tracker_api, tracker_apis}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; @@ -11,24 +10,9 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; use super::Version; -use crate::common::ephemeral_random_port; pub fn tracker_configuration() -> Arc { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.http_api.bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - Arc::new(config) + Arc::new(ephemeral_configuration()) } pub async fn start_default_api(version: &Version) -> Server { diff --git a/tests/common/mod.rs b/tests/common/mod.rs deleted file mode 100644 index 5fd484cf5..000000000 --- a/tests/common/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -use rand::{thread_rng, Rng}; - -pub fn ephemeral_random_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) -} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index bac9d1324..301dd5890 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -14,7 +14,6 @@ extern crate rand; mod api; -mod common; mod tracker_api { diff --git a/tests/udp.rs b/tests/udp.rs index 55384db05..408f4f795 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -3,11 +3,8 @@ /// cargo test `udp_tracker_server` -- --nocapture extern crate rand; -mod common; - mod udp_tracker_server { use core::panic; - use std::env; use std::io::Cursor; use std::net::Ipv4Addr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -17,32 +14,24 @@ mod udp_tracker_server { AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Request, Response, ScrapeRequest, TransactionId, }; + use rand::{thread_rng, Rng}; use tokio::net::UdpSocket; use tokio::task::JoinHandle; - use torrust_tracker::config::Configuration; + use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::udp::MAX_PACKET_SIZE; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - use crate::common::ephemeral_random_port; - fn tracker_configuration() -> Arc { - let mut config = Configuration { - log_level: Some("off".to_owned()), - ..Default::default() - }; - - // Ephemeral socket address - let port = ephemeral_random_port(); - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &port); - - // Ephemeral database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}.db", &port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); + Arc::new(ephemeral_configuration()) + } - Arc::new(config) + pub fn ephemeral_random_client_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) } pub struct UdpServer { @@ -129,7 +118,7 @@ mod udp_tracker_server { /// Creates a new `UdpClient` connected to a Udp server async fn new_connected_udp_client(remote_address: &str) -> UdpClient { - let client = UdpClient::bind(&source_address(ephemeral_random_port())).await; + let client = UdpClient::bind(&source_address(ephemeral_random_client_port())).await; client.connect(remote_address).await; client } From 1c6db6e6c47c8ed5ebd5efba8c1a0a541eb93a3f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Jan 2023 17:28:59 +0000 Subject: [PATCH 225/435] test: [#143] add tests for extracted functions --- src/api/resource/stats.rs | 54 ++++++++++++++++++++++++++++++ src/tracker/mod.rs | 48 ++++++++++++++++++++++++++ src/tracker/services/statistics.rs | 48 ++++++++++++++++++++++++++ 3 files changed, 150 insertions(+) diff --git a/src/api/resource/stats.rs b/src/api/resource/stats.rs index c861876fa..44ac814dc 100644 --- a/src/api/resource/stats.rs +++ b/src/api/resource/stats.rs @@ -44,3 +44,57 @@ impl From for Stats { } } } + +#[cfg(test)] +mod tests { + use super::Stats; + use crate::tracker::services::statistics::TrackerMetrics; + use crate::tracker::statistics::Metrics; + use crate::tracker::TorrentsMetrics; + + #[test] + fn stats_resource_should_be_converted_from_tracker_metrics() { + assert_eq!( + Stats::from(TrackerMetrics { + torrents_metrics: TorrentsMetrics { + seeders: 1, + completed: 2, + leechers: 3, + torrents: 4 + }, + protocol_metrics: Metrics { + tcp4_connections_handled: 5, + tcp4_announces_handled: 6, + tcp4_scrapes_handled: 7, + tcp6_connections_handled: 8, + tcp6_announces_handled: 9, + tcp6_scrapes_handled: 10, + udp4_connections_handled: 11, + udp4_announces_handled: 12, + udp4_scrapes_handled: 13, + udp6_connections_handled: 14, + udp6_announces_handled: 15, + udp6_scrapes_handled: 16 + } + }), + Stats { + torrents: 4, + seeders: 1, + completed: 2, + leechers: 3, + tcp4_connections_handled: 5, + tcp4_announces_handled: 6, + tcp4_scrapes_handled: 7, + tcp6_connections_handled: 8, + tcp6_announces_handled: 9, + tcp6_scrapes_handled: 10, + udp4_connections_handled: 11, + udp4_announces_handled: 12, + udp4_scrapes_handled: 13, + udp6_connections_handled: 14, + udp6_announces_handled: 15, + udp6_scrapes_handled: 16 + } + ); + } +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 4de168908..f33628355 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -340,3 +340,51 @@ impl Tracker { } } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use super::statistics::Keeper; + use super::{TorrentsMetrics, Tracker}; + use crate::config::{ephemeral_configuration, Configuration}; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } + + pub fn tracker_factory() -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Configuration + let configuration = tracker_configuration(); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + #[tokio::test] + async fn the_tracker_should_collect_torrent_metrics() { + let tracker = tracker_factory(); + + let torrents_metrics = tracker.get_torrents_metrics().await; + + assert_eq!( + torrents_metrics, + TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 0, + torrents: 0 + } + ); + } +} diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 696ca2ea1..bbc069dd3 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -31,3 +31,51 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { }, } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use super::Tracker; + use crate::config::{ephemeral_configuration, Configuration}; + use crate::tracker; + use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; + use crate::tracker::statistics::Keeper; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } + + pub fn tracker_factory() -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Configuration + let configuration = tracker_configuration(); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + #[tokio::test] + async fn the_statistics_service_should_return_the_tracker_metrics() { + let tracker = Arc::new(tracker_factory()); + + let tracker_metrics = get_metrics(tracker.clone()).await; + + assert_eq!( + tracker_metrics, + TrackerMetrics { + torrents_metrics: tracker::TorrentsMetrics::default(), + protocol_metrics: tracker::statistics::Metrics::default(), + } + ); + } +} From 43dbed933c5a7560fc36aa2fde981d89020d39bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jan 2023 14:12:06 +0000 Subject: [PATCH 226/435] feat(api): [#143] authentication with GET param for Axum API It keeps the same contract of the API. It returns 500 status code with error message in "debug" format. --- cSpell.json | 1 + src/apis/middlewares/auth.rs | 62 ++++++++++++++++++++++++++++++++++++ src/apis/middlewares/mod.rs | 1 + src/apis/mod.rs | 1 + src/apis/server.rs | 13 +++++--- src/config.rs | 19 ++++++++++- tests/tracker_api.rs | 19 +++++++++++ 7 files changed, 111 insertions(+), 5 deletions(-) create mode 100644 src/apis/middlewares/auth.rs create mode 100644 src/apis/middlewares/mod.rs diff --git a/cSpell.json b/cSpell.json index 57b9f3b67..801d35dbb 100644 --- a/cSpell.json +++ b/cSpell.json @@ -32,6 +32,7 @@ "leechers", "libtorrent", "Lphant", + "middlewares", "mockall", "myacicontext", "nanos", diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs new file mode 100644 index 000000000..e04d5f2c5 --- /dev/null +++ b/src/apis/middlewares/auth.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use axum::extract::{Query, State}; +use axum::http::{header, Request, StatusCode}; +use axum::middleware::Next; +use axum::response::{IntoResponse, Response}; +use serde::Deserialize; + +use crate::config::{Configuration, HttpApi}; + +#[derive(Deserialize, Debug)] +pub struct QueryParams { + pub token: Option, +} + +/// Middleware for authentication using a "token" GET param. +/// The token must be one of the tokens in the tracker HTTP API configuration. +pub async fn auth( + State(config): State>, + Query(params): Query, + request: Request, + next: Next, +) -> Response +where + B: Send, +{ + let token = match params.token { + None => return AuthError::Unauthorized.into_response(), + Some(token) => token, + }; + + if !authenticate(&token, &config.http_api) { + return AuthError::TokenNotValid.into_response(); + } + + next.run(request).await +} + +enum AuthError { + Unauthorized, + TokenNotValid, +} + +impl IntoResponse for AuthError { + fn into_response(self) -> Response { + let body = match self { + AuthError::Unauthorized => "Unhandled rejection: Err { reason: \"unauthorized\" }", + AuthError::TokenNotValid => "Unhandled rejection: Err { reason: \"token not valid\" }", + }; + + ( + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain")], + body, + ) + .into_response() + } +} + +fn authenticate(token: &str, http_api_config: &HttpApi) -> bool { + http_api_config.contains_token(token) +} diff --git a/src/apis/middlewares/mod.rs b/src/apis/middlewares/mod.rs new file mode 100644 index 000000000..0e4a05d59 --- /dev/null +++ b/src/apis/middlewares/mod.rs @@ -0,0 +1 @@ +pub mod auth; diff --git a/src/apis/mod.rs b/src/apis/mod.rs index f2ec6ffbd..ea1615d6b 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,2 +1,3 @@ +pub mod middlewares; pub mod routes; pub mod server; diff --git a/src/apis/server.rs b/src/apis/server.rs index fb532519f..db7224cde 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -2,17 +2,19 @@ use std::net::SocketAddr; use std::sync::Arc; use axum::routing::get; -use axum::Router; +use axum::{middleware, Router}; use futures::Future; use warp::hyper; +use super::middlewares::auth::auth; use super::routes::{get_stats, root}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() .route("/", get(root)) - .route("/stats", get(get_stats).with_state(tracker.clone())); + .route("/stats", get(get_stats).with_state(tracker.clone())) + .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -25,11 +27,14 @@ pub fn start_tls( socket_addr: SocketAddr, _ssl_cert_path: &str, _ssl_key_path: &str, - _tracker: &Arc, + tracker: &Arc, ) -> impl Future> { // todo: for the time being, it's just a copy & paste from start(...). - let app = Router::new().route("/", get(root)); + let app = Router::new() + .route("/", get(root)) + .route("/stats", get(get_stats).with_state(tracker.clone())) + .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); diff --git a/src/config.rs b/src/config.rs index 05a446454..275339aa0 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::net::IpAddr; use std::path::Path; use std::str::FromStr; @@ -44,6 +44,15 @@ pub struct HttpApi { pub access_tokens: HashMap, } +impl HttpApi { + #[must_use] + pub fn contains_token(&self, token: &str) -> bool { + let tokens: HashMap = self.access_tokens.clone(); + let tokens: HashSet = tokens.into_values().collect(); + tokens.contains(token) + } +} + #[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { @@ -366,4 +375,12 @@ mod tests { assert_eq!(format!("{error}"), "TrackerModeIncompatible"); } + + #[test] + fn http_api_configuration_should_check_if_it_contains_a_token() { + let configuration = Configuration::default(); + + assert!(configuration.http_api.contains_token("MyAccessToken")); + assert!(!configuration.http_api.contains_token("NonExistingToken")); + } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 301dd5890..bc69a1c93 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -602,7 +602,9 @@ mod tracker_apis { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; use crate::api::Version; @@ -645,5 +647,22 @@ mod tracker_apis { } ); } + + #[tokio::test] + async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .get_tracker_statistics() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .get_tracker_statistics() + .await; + + assert_unauthorized(response).await; + } } } From 13959452f59949b91789050b8eda597fb82728b7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jan 2023 14:22:06 +0000 Subject: [PATCH 227/435] refactor(api): [#143] remove dummy api endpoint It was added to test Axum configuration. --- src/apis/routes.rs | 5 ----- src/apis/server.rs | 4 +--- tests/tracker_api.rs | 20 -------------------- 3 files changed, 1 insertion(+), 28 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 58eefa8b0..93474c4c2 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -8,11 +8,6 @@ use crate::api::resource::stats::Stats; use crate::tracker::services::statistics::get_metrics; use crate::tracker::Tracker; -#[allow(clippy::unused_async)] -pub async fn root() -> Json { - Json(json!({ "data": 42 })) -} - #[allow(clippy::unused_async)] pub async fn get_stats(State(tracker): State>) -> Json { Json(json!(Stats::from(get_metrics(tracker.clone()).await))) diff --git a/src/apis/server.rs b/src/apis/server.rs index db7224cde..9ddf4a8d3 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -7,12 +7,11 @@ use futures::Future; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::{get_stats, root}; +use super::routes::get_stats; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() - .route("/", get(root)) .route("/stats", get(get_stats).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); @@ -32,7 +31,6 @@ pub fn start_tls( // todo: for the time being, it's just a copy & paste from start(...). let app = Router::new() - .route("/", get(root)) .route("/stats", get(get_stats).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index bc69a1c93..25d747f27 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -553,9 +553,6 @@ mod tracker_apis { Endpoints: - Root (dummy endpoint to test Axum configuration. To be removed): - - [x] GET / - Stats: - [ ] GET /api/stats @@ -579,23 +576,6 @@ mod tracker_apis { */ - mod for_entrypoint { - use crate::api::client::{Client, Query}; - use crate::api::server::start_default_api; - use crate::api::Version; - - #[tokio::test] - async fn test_entrypoint() { - let api_server = start_default_api(&Version::Axum).await; - - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .get("", Query::default()) - .await; - - assert_eq!(response.status(), 200); - } - } - mod for_stats_resources { use std::str::FromStr; From af51f77743e58c3f0fb9121a97447e90a7b9ba09 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jan 2023 17:19:30 +0000 Subject: [PATCH 228/435] feat(api): [#143] add new cargo dependency: axum-server The new API implementation uses Axum. Axum does not support SSL configuration. The "axum-server" crate provides it. --- Cargo.lock | 38 +++++++++++++++++++++++++++++++++++++- Cargo.toml | 1 + 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 8e40508dc..8f8d753b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,6 +56,12 @@ dependencies = [ "either", ] +[[package]] +name = "arc-swap" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" + [[package]] name = "arrayvec" version = "0.5.2" @@ -146,6 +152,26 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum-server" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8456dab8f11484979a86651da8e619b355ede5d61a160755155f6c344bd18c47" +dependencies = [ + "arc-swap", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "rustls", + "rustls-pemfile 1.0.1", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "base-x" version = "0.2.11" @@ -2090,6 +2116,15 @@ dependencies = [ "base64", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +dependencies = [ + "base64", +] + [[package]] name = "rustversion" version = "1.0.11" @@ -2764,6 +2799,7 @@ dependencies = [ "aquatic_udp_protocol", "async-trait", "axum", + "axum-server", "binascii", "chrono", "config", @@ -3037,7 +3073,7 @@ dependencies = [ "multipart", "percent-encoding", "pin-project", - "rustls-pemfile", + "rustls-pemfile 0.2.1", "scoped-tls", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index 8ddefe78e..434b4cace 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,7 @@ async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } axum = "0.6.1" +axum-server = { version = "0.4.4", features = ["tls-rustls"] } [dev-dependencies] mockall = "0.11" From fe4303c0af047add8b413e8cb8b4f980e58b7e8c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jan 2023 17:21:18 +0000 Subject: [PATCH 229/435] feat(api): [#143] SSL support for the new Axum API --- cSpell.json | 1 + src/apis/server.rs | 24 ++++++++++++++++-------- src/jobs/tracker_apis.rs | 23 +++++++++++++++++------ 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/cSpell.json b/cSpell.json index 801d35dbb..bb15b6d91 100644 --- a/cSpell.json +++ b/cSpell.json @@ -49,6 +49,7 @@ "rngs", "rusqlite", "rustfmt", + "Rustls", "Seedable", "Shareaza", "sharktorrent", diff --git a/src/apis/server.rs b/src/apis/server.rs index 9ddf4a8d3..668959cd6 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -3,7 +3,10 @@ use std::sync::Arc; use axum::routing::get; use axum::{middleware, Router}; +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; use futures::Future; +use log::info; use warp::hyper; use super::middlewares::auth::auth; @@ -19,24 +22,29 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F server.with_graceful_shutdown(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust APIs server on http://{} ...", socket_addr); }) } pub fn start_tls( socket_addr: SocketAddr, - _ssl_cert_path: &str, - _ssl_key_path: &str, + ssl_config: RustlsConfig, tracker: &Arc, -) -> impl Future> { - // todo: for the time being, it's just a copy & paste from start(...). - +) -> impl Future> { let app = Router::new() .route("/stats", get(get_stats).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + let handle = Handle::new(); + let shutdown_handle = handle.clone(); - server.with_graceful_shutdown(async move { + tokio::spawn(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }) + info!("Stopping Torrust APIs server on https://{} ...", socket_addr); + shutdown_handle.shutdown(); + }); + + axum_server::bind_rustls(socket_addr, ssl_config) + .handle(handle) + .serve(app.into_make_service()) } diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs index b696c923d..00e39eeba 100644 --- a/src/jobs/tracker_apis.rs +++ b/src/jobs/tracker_apis.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use axum_server::tls_rustls::RustlsConfig; use log::info; use tokio::sync::oneshot; use tokio::task::JoinHandle; @@ -29,17 +30,27 @@ pub async fn start_job(config: &HttpApi, tracker: Arc) -> Join let join_handle = tokio::spawn(async move { if !ssl_enabled { info!("Starting Torrust APIs server on: http://{}", bind_addr); + let handle = server::start(bind_addr, &tracker); - tx.send(ApiServerJobStarted()).expect("the start job dropped"); + + tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); + if let Ok(()) = handle.await { - info!("Stopping Torrust APIs server on {} ...", bind_addr); + info!("Torrust APIs server on http://{} stopped", bind_addr); } } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { info!("Starting Torrust APIs server on: https://{}", bind_addr); - let handle = server::start_tls(bind_addr, &ssl_cert_path.unwrap(), &ssl_key_path.unwrap(), &tracker); - tx.send(ApiServerJobStarted()).expect("the start job dropped"); + + let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) + .await + .unwrap(); + + let handle = server::start_tls(bind_addr, ssl_config, &tracker); + + tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); + if let Ok(()) = handle.await { - info!("Stopping Torrust APIs server on {} ...", bind_addr); + info!("Torrust APIs server on https://{} stopped", bind_addr); } } }); @@ -47,7 +58,7 @@ pub async fn start_job(config: &HttpApi, tracker: Arc) -> Join // Wait until the APIs server job is running match rx.await { Ok(_msg) => info!("Torrust APIs server started"), - Err(e) => panic!("the apis server was dropped: {e}"), + Err(e) => panic!("the API server was dropped: {e}"), } join_handle From 16d438dced06f481744160c9199e20fd893822ab Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jan 2023 11:42:24 +0000 Subject: [PATCH 230/435] feat(api): [#143] axum api, WIP. GET /api/torrent/:info_hash endpoint Not all cases finished yet. Not found case is pending. --- src/api/resource/torrent.rs | 70 ++++++++++++++++- src/api/routes.rs | 6 +- src/apis/routes.rs | 23 ++++-- src/apis/server.rs | 3 +- src/tracker/services/common.rs | 24 ++++++ src/tracker/services/mod.rs | 2 + src/tracker/services/statistics.rs | 23 +----- src/tracker/services/torrent.rs | 116 +++++++++++++++++++++++++++++ tests/tracker_api.rs | 63 ++++++++++++++++ 9 files changed, 297 insertions(+), 33 deletions(-) create mode 100644 src/tracker/services/common.rs create mode 100644 src/tracker/services/torrent.rs diff --git a/src/api/resource/torrent.rs b/src/api/resource/torrent.rs index 924b61b8c..bec82a132 100644 --- a/src/api/resource/torrent.rs +++ b/src/api/resource/torrent.rs @@ -1,11 +1,14 @@ use serde::{Deserialize, Serialize}; +use super::peer; +use crate::tracker::services::torrent::Info; + #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Torrent { pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, #[serde(skip_serializing_if = "Option::is_none")] pub peers: Option>, } @@ -19,3 +22,64 @@ pub struct ListItem { // todo: this is always None. Remove field from endpoint? pub peers: Option>, } + +impl From for Torrent { + fn from(info: Info) -> Self { + Self { + info_hash: info.info_hash.to_string(), + seeders: info.seeders, + completed: info.completed, + leechers: info.leechers, + peers: info + .peers + .map(|peers| peers.iter().map(|peer| peer::Peer::from(*peer)).collect()), + } + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::api::resource::peer::Peer; + use crate::api::resource::torrent::Torrent; + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + use crate::tracker::services::torrent::Info; + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + #[test] + fn torrent_resource_should_be_converted_from_torrent_info() { + assert_eq!( + Torrent::from(Info { + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + seeders: 1, + completed: 2, + leechers: 3, + peers: Some(vec![sample_peer()]), + }), + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 2, + leechers: 3, + peers: Some(vec![Peer::from(sample_peer())]), + } + ); + } +} diff --git a/src/api/routes.rs b/src/api/routes.rs index 73f1269ef..b29023f2f 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -124,9 +124,9 @@ pub fn routes(tracker: &Arc) -> impl Filter>) -> Json { - Json(json!(Stats::from(get_metrics(tracker.clone()).await))) +pub async fn get_stats(State(tracker): State>) -> Json { + Json(Stats::from(get_metrics(tracker.clone()).await)) +} + +/// # Panics +/// +/// Will panic if the torrent does not exist. +pub async fn get_torrent(State(tracker): State>, Path(info_hash): Path) -> Json { + let info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()) + .await + .unwrap(); + // todo: return "not found" if the torrent does not exist + Json(Torrent::from(info)) } diff --git a/src/apis/server.rs b/src/apis/server.rs index 668959cd6..dcd0924c1 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -10,12 +10,13 @@ use log::info; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::get_stats; +use super::routes::{get_stats, get_torrent}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() .route("/stats", get(get_stats).with_state(tracker.clone())) + .route("/torrent/:info_hash", get(get_torrent).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); diff --git a/src/tracker/services/common.rs b/src/tracker/services/common.rs new file mode 100644 index 000000000..8757e6a21 --- /dev/null +++ b/src/tracker/services/common.rs @@ -0,0 +1,24 @@ +use std::sync::Arc; + +use crate::config::Configuration; +use crate::tracker::statistics::Keeper; +use crate::tracker::Tracker; + +/// # Panics +/// +/// Will panic if tracker cannot be instantiated. +#[must_use] +pub fn tracker_factory(configuration: &Arc) -> Tracker { + // todo: the tracker initialization is duplicated in many places. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } +} diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs index 3449ec7b4..ffa5bb253 100644 --- a/src/tracker/services/mod.rs +++ b/src/tracker/services/mod.rs @@ -1 +1,3 @@ pub mod statistics; +pub mod torrent; +pub mod common; diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index bbc069dd3..745f5563c 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -36,37 +36,18 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { mod tests { use std::sync::Arc; - use super::Tracker; use crate::config::{ephemeral_configuration, Configuration}; use crate::tracker; + use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; - use crate::tracker::statistics::Keeper; pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) } - pub fn tracker_factory() -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Configuration - let configuration = tracker_configuration(); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } - } - #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let tracker = Arc::new(tracker_factory()); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let tracker_metrics = get_metrics(tracker.clone()).await; diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs new file mode 100644 index 000000000..da7d24ce5 --- /dev/null +++ b/src/tracker/services/torrent.rs @@ -0,0 +1,116 @@ +use std::sync::Arc; + +use crate::protocol::info_hash::InfoHash; +use crate::tracker::peer::Peer; +use crate::tracker::Tracker; + +#[derive(Debug, PartialEq)] +pub struct Info { + pub info_hash: InfoHash, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + pub peers: Option>, +} + +pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { + let db = tracker.get_torrents().await; + + let torrent_entry_option = db.get(info_hash); + + let torrent_entry = match torrent_entry_option { + Some(torrent_entry) => torrent_entry, + None => { + return None; + } + }; + + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + let peers = torrent_entry.get_peers(None); + + let peers = Some(peers.iter().map(|peer| (**peer)).collect()); + + Some(Info { + info_hash: *info_hash, + seeders: u64::from(seeders), + completed: u64::from(completed), + leechers: u64::from(leechers), + peers, + }) +} + +#[cfg(test)] +mod tests { + + mod getting_a_torrent_info { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::torrent::{get_torrent_info, Info}; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + #[tokio::test] + async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let torrent_info = get_torrent_info( + tracker.clone(), + &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), + ) + .await; + + assert!(torrent_info.is_none()); + } + + #[tokio::test] + async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) + .await; + + let torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&hash).unwrap()) + .await + .unwrap(); + + assert_eq!( + torrent_info, + Info { + info_hash: InfoHash::from_str(&hash).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![sample_peer()]), + } + ); + } + } +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 25d747f27..78f8efbb1 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -645,4 +645,67 @@ mod tracker_apis { assert_unauthorized(response).await; } } + + mod for_torrent_resources { + use std::str::FromStr; + + use torrust_tracker::api::resource; + use torrust_tracker::api::resource::torrent::Torrent; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::fixtures::sample_peer; + use crate::api::server::start_default_api; + use crate::api::Version; + + #[tokio::test] + async fn should_allow_getting_a_torrent_info() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let peer = sample_peer(); + + api_server.add_torrent(&info_hash, &peer).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrent(&info_hash.to_string()) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::().await.unwrap(), + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![resource::peer::Peer::from(peer)]) + } + ); + } + + #[tokio::test] + async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + api_server.add_torrent(&info_hash, &sample_peer()).await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .get_torrent(&info_hash.to_string()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .get_torrent(&info_hash.to_string()) + .await; + + assert_unauthorized(response).await; + } + } } From 2aebf9ad396298d41454bfda75528395fab0d085 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 16:53:05 +0000 Subject: [PATCH 231/435] test(api): [#143] add test for torrent not known response in GET /api/torrent/:info_hash endpoint --- src/apis/middlewares/auth.rs | 2 +- tests/api/asserts.rs | 8 ++++++++ tests/tracker_api.rs | 15 ++++++++++++++- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs index e04d5f2c5..905160a06 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/middlewares/auth.rs @@ -50,7 +50,7 @@ impl IntoResponse for AuthError { ( StatusCode::INTERNAL_SERVER_ERROR, - [(header::CONTENT_TYPE, "text/plain")], + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], body, ) .into_response() diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 5d664d5c4..5e03c2573 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -2,6 +2,7 @@ use reqwest::Response; pub async fn assert_token_not_valid(response: Response) { assert_eq!(response.status(), 500); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); assert_eq!( response.text().await.unwrap(), "Unhandled rejection: Err { reason: \"token not valid\" }" @@ -10,8 +11,15 @@ pub async fn assert_token_not_valid(response: Response) { pub async fn assert_unauthorized(response: Response) { assert_eq!(response.status(), 500); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); assert_eq!( response.text().await.unwrap(), "Unhandled rejection: Err { reason: \"unauthorized\" }" ); } + +pub async fn assert_torrent_not_known(response: Response) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 78f8efbb1..0a942ea45 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -119,7 +119,7 @@ mod tracker_api { use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -251,6 +251,19 @@ mod tracker_api { ); } + #[tokio::test] + async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { + let api_server = start_default_api(&Version::Warp).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_not_known(response).await; + } + #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; From a649fe861a5f7508621c4c7630c676dc28c68d8d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 16:54:21 +0000 Subject: [PATCH 232/435] feat(api): [#143] axum api. GET /api/torrent/:info_hash endpoint. Not found case --- cSpell.json | 1 + src/apis/routes.rs | 18 ++++++++++-------- tests/tracker_api.rs | 15 ++++++++++++++- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/cSpell.json b/cSpell.json index bb15b6d91..537ea65a5 100644 --- a/cSpell.json +++ b/cSpell.json @@ -26,6 +26,7 @@ "hlocalhost", "Hydranode", "incompletei", + "infohash", "infoschema", "intervali", "leecher", diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 72be81ab0..9fedbc822 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -2,7 +2,8 @@ use std::str::FromStr; use std::sync::Arc; use axum::extract::{Path, State}; -use axum::response::Json; +use axum::response::{IntoResponse, Json, Response}; +use serde_json::json; use crate::api::resource::stats::Stats; use crate::api::resource::torrent::Torrent; @@ -17,11 +18,12 @@ pub async fn get_stats(State(tracker): State>) -> Json { /// # Panics /// -/// Will panic if the torrent does not exist. -pub async fn get_torrent(State(tracker): State>, Path(info_hash): Path) -> Json { - let info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()) - .await - .unwrap(); - // todo: return "not found" if the torrent does not exist - Json(Torrent::from(info)) +/// Will panic if it can't parse the infohash in the request +pub async fn get_torrent(State(tracker): State>, Path(info_hash): Path) -> Response { + let optional_torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()).await; + + match optional_torrent_info { + Some(info) => Json(Torrent::from(info)).into_response(), + None => Json(json!("torrent not known")).into_response(), + } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 0a942ea45..bc5271c21 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -666,7 +666,7 @@ mod tracker_apis { use torrust_tracker::api::resource::torrent::Torrent; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -700,6 +700,19 @@ mod tracker_apis { ); } + #[tokio::test] + async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_not_known(response).await; + } + #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; From ded4d110dc04b71d6448a6dc3c51a6049acf37e3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 16:59:49 +0000 Subject: [PATCH 233/435] fix: clippy errors --- src/tracker/services/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs index ffa5bb253..2fd557d54 100644 --- a/src/tracker/services/mod.rs +++ b/src/tracker/services/mod.rs @@ -1,3 +1,3 @@ +pub mod common; pub mod statistics; pub mod torrent; -pub mod common; From a8061792f4ae1b9fcb95f778d42f706f05206ab5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 17:11:43 +0000 Subject: [PATCH 234/435] refactor(api): [#143] use extracted service in the Warp handler --- src/api/routes.rs | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/src/api/routes.rs b/src/api/routes.rs index b29023f2f..f3ff990ff 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -7,13 +7,13 @@ use serde::Deserialize; use warp::{filters, reply, Filter}; use super::resource::auth_key::AuthKey; -use super::resource::peer; use super::resource::stats::Stats; use super::resource::torrent::{ListItem, Torrent}; use super::{ActionStatus, TorrentInfoQuery}; use crate::protocol::info_hash::InfoHash; use crate::tracker; use crate::tracker::services::statistics::get_metrics; +use crate::tracker::services::torrent::get_torrent_info; fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] @@ -107,28 +107,12 @@ pub fn routes(tracker: &Arc) -> impl Filter)| async move { - let db = tracker.get_torrents().await; - let torrent_entry_option = db.get(&info_hash); - - let torrent_entry = match torrent_entry_option { - Some(torrent_entry) => torrent_entry, - None => { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); - } - }; - let (seeders, completed, leechers) = torrent_entry.get_stats(); + let optional_torrent_info = get_torrent_info(tracker.clone(), &info_hash).await; - let peers = torrent_entry.get_peers(None); - - let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); - - Ok(reply::json(&Torrent { - info_hash: info_hash.to_string(), - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), - peers: Some(peer_resources), - })) + match optional_torrent_info { + Some(info) => Ok(reply::json(&Torrent::from(info))), + None => Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")), + } }); // DELETE /api/whitelist/:info_hash From c36b121dce4674d43011920c56189facf5972bc5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jan 2023 20:01:09 +0000 Subject: [PATCH 235/435] refactor(api): [#143] extract service tracker::services::torrent::get_torrents It will be used in the new Axum implementaion for the API. In the API enpoint: ``` GET /api/torrents?offset=:u32&limit=:u32 ``` --- src/api/resource/torrent.rs | 61 ++++++++- src/api/routes.rs | 23 +--- src/tracker/services/torrent.rs | 218 +++++++++++++++++++++++++++++--- 3 files changed, 256 insertions(+), 46 deletions(-) diff --git a/src/api/resource/torrent.rs b/src/api/resource/torrent.rs index bec82a132..56fead37a 100644 --- a/src/api/resource/torrent.rs +++ b/src/api/resource/torrent.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use super::peer; -use crate::tracker::services::torrent::Info; +use crate::tracker::services::torrent::{BasicInfo, Info}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Torrent { @@ -16,13 +16,31 @@ pub struct Torrent { #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct ListItem { pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, // todo: this is always None. Remove field from endpoint? pub peers: Option>, } +impl ListItem { + #[must_use] + pub fn new_vec(basic_info_vec: &[BasicInfo]) -> Vec { + basic_info_vec + .iter() + .map(|basic_info| ListItem::from((*basic_info).clone())) + .collect() + } +} + +#[must_use] +pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { + basic_info_vec + .iter() + .map(|basic_info| ListItem::from((*basic_info).clone())) + .collect() +} + impl From for Torrent { fn from(info: Info) -> Self { Self { @@ -37,6 +55,18 @@ impl From for Torrent { } } +impl From for ListItem { + fn from(basic_info: BasicInfo) -> Self { + Self { + info_hash: basic_info.info_hash.to_string(), + seeders: basic_info.seeders, + completed: basic_info.completed, + leechers: basic_info.leechers, + peers: None, + } + } +} + #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; @@ -45,11 +75,11 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::api::resource::peer::Peer; - use crate::api::resource::torrent::Torrent; + use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; - use crate::tracker::services::torrent::Info; + use crate::tracker::services::torrent::{BasicInfo, Info}; fn sample_peer() -> peer::Peer { peer::Peer { @@ -82,4 +112,23 @@ mod tests { } ); } + + #[test] + fn torrent_resource_list_item_should_be_converted_from_the_basic_torrent_info() { + assert_eq!( + ListItem::from(BasicInfo { + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + seeders: 1, + completed: 2, + leechers: 3, + }), + ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 2, + leechers: 3, + peers: None, + } + ); + } } diff --git a/src/api/routes.rs b/src/api/routes.rs index f3ff990ff..bb459ee95 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -13,7 +13,7 @@ use super::{ActionStatus, TorrentInfoQuery}; use crate::protocol::info_hash::InfoHash; use crate::tracker; use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::get_torrent_info; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents}; fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] @@ -64,24 +64,9 @@ pub fn routes(tracker: &Arc) -> impl Filter = db - .iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - ListItem { - info_hash: info_hash.to_string(), - seeders, - completed, - leechers, - peers: None, - } - }) - .skip(offset as usize) - .take(limit as usize) - .collect(); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + Result::<_, warp::reject::Rejection>::Ok(reply::json(&ListItem::new_vec( + &get_torrents(tracker.clone(), offset, limit).await, + ))) }); // GET /api/stats diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index da7d24ce5..00cdfe136 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -13,6 +13,14 @@ pub struct Info { pub peers: Option>, } +#[derive(Debug, PartialEq, Clone)] +pub struct BasicInfo { + pub info_hash: InfoHash, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, +} + pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { let db = tracker.get_torrents().await; @@ -40,39 +48,60 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op }) } +pub async fn get_torrents(tracker: Arc, offset: u32, limit: u32) -> Vec { + let db = tracker.get_torrents().await; + + db.iter() + .map(|(info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + BasicInfo { + info_hash: *info_hash, + seeders: u64::from(seeders), + completed: u64::from(completed), + leechers: u64::from(leechers), + } + }) + .skip(offset as usize) + .take(limit as usize) + .collect() +} + #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::tracker::peer; + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } mod getting_a_torrent_info { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::config::{ephemeral_configuration, Configuration}; - use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; - use crate::tracker::peer; use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrent_info, Info}; pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) } - fn sample_peer() -> peer::Peer { - peer::Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - } - } - #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); @@ -92,14 +121,11 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) .await; - let torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&hash).unwrap()) - .await - .unwrap(); + let torrent_info = get_torrent_info(tracker.clone(), &info_hash).await.unwrap(); assert_eq!( torrent_info, @@ -113,4 +139,154 @@ mod tests { ); } } + + mod searching_for_torrents { + + use std::str::FromStr; + use std::sync::Arc; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::torrent::tests::sample_peer; + use crate::tracker::services::torrent::{get_torrents, BasicInfo}; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } + + #[tokio::test] + async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let offset = 0; + let limit = 4000; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!(torrents, vec![]); + } + + #[tokio::test] + async fn should_return_a_summarized_info_for_all_torrents() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let offset = 0; + let limit = 4000; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) + .await; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!( + torrents, + vec![BasicInfo { + info_hash: InfoHash::from_str(&hash).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }] + ); + } + + #[tokio::test] + async fn should_allow_limiting_the_number_of_torrents_in_the_result() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) + .await; + tracker + .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) + .await; + + let offset = 0; + let limit = 1; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!(torrents.len(), 1); + } + + #[tokio::test] + async fn should_allow_using_pagination_in_the_result() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) + .await; + tracker + .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) + .await; + + let offset = 1; + let limit = 4000; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!(torrents.len(), 1); + assert_eq!( + torrents, + vec![BasicInfo { + info_hash: InfoHash::from_str(&hash1).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }] + ); + } + + #[tokio::test] + async fn should_return_torrents_ordered_by_info_hash() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + tracker + .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) + .await; + + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + tracker + .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) + .await; + + let offset = 0; + let limit = 4000; + + let torrents = get_torrents(tracker.clone(), offset, limit).await; + + assert_eq!( + torrents, + vec![ + BasicInfo { + info_hash: InfoHash::from_str(&hash2).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }, + BasicInfo { + info_hash: InfoHash::from_str(&hash1).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + } + ] + ); + } + } } From 1515753b3f2365aad5d646e3efeced1b608b15e9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jan 2023 15:37:02 +0000 Subject: [PATCH 236/435] feat(api): [#143] axum api. GET /api/torrents endpoint --- src/api/routes.rs | 4 +- src/apis/routes.rs | 46 ++++++++++++-- src/apis/server.rs | 11 ++-- src/tracker/services/torrent.rs | 73 ++++++++++++++++++----- tests/tracker_api.rs | 102 +++++++++++++++++++++++++++++++- 5 files changed, 207 insertions(+), 29 deletions(-) diff --git a/src/api/routes.rs b/src/api/routes.rs index bb459ee95..4280cdb35 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -13,7 +13,7 @@ use super::{ActionStatus, TorrentInfoQuery}; use crate::protocol::info_hash::InfoHash; use crate::tracker; use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents}; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; fn authenticate(tokens: HashMap) -> impl Filter + Clone { #[derive(Deserialize)] @@ -65,7 +65,7 @@ pub fn routes(tracker: &Arc) -> impl Filter::Ok(reply::json(&ListItem::new_vec( - &get_torrents(tracker.clone(), offset, limit).await, + &get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await, ))) }); diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 9fedbc822..b86a468e2 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,25 +1,27 @@ +use std::fmt; use std::str::FromStr; use std::sync::Arc; -use axum::extract::{Path, State}; +use axum::extract::{Path, Query, State}; use axum::response::{IntoResponse, Json, Response}; +use serde::{de, Deserialize, Deserializer}; use serde_json::json; use crate::api::resource::stats::Stats; -use crate::api::resource::torrent::Torrent; +use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::get_torrent_info; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; -pub async fn get_stats(State(tracker): State>) -> Json { +pub async fn get_stats_handler(State(tracker): State>) -> Json { Json(Stats::from(get_metrics(tracker.clone()).await)) } /// # Panics /// /// Will panic if it can't parse the infohash in the request -pub async fn get_torrent(State(tracker): State>, Path(info_hash): Path) -> Response { +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { let optional_torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()).await; match optional_torrent_info { @@ -27,3 +29,37 @@ pub async fn get_torrent(State(tracker): State>, Path(info_hash): P None => Json(json!("torrent not known")).into_response(), } } + +#[derive(Deserialize)] +pub struct PaginationParams { + #[serde(default, deserialize_with = "empty_string_as_none")] + pub offset: Option, + pub limit: Option, +} + +pub async fn get_torrents_handler( + State(tracker): State>, + pagination: Query, +) -> Json> { + Json(ListItem::new_vec( + &get_torrents( + tracker.clone(), + &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + ) + .await, + )) +} + +/// Serde deserialization decorator to map empty Strings to None, +fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: FromStr, + T::Err: fmt::Display, +{ + let opt = Option::::deserialize(de)?; + match opt.as_deref() { + None | Some("") => Ok(None), + Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), + } +} diff --git a/src/apis/server.rs b/src/apis/server.rs index dcd0924c1..879160136 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -10,13 +10,14 @@ use log::info; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::{get_stats, get_torrent}; +use super::routes::{get_stats_handler, get_torrent_handler, get_torrents_handler}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() - .route("/stats", get(get_stats).with_state(tracker.clone())) - .route("/torrent/:info_hash", get(get_torrent).with_state(tracker.clone())) + .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) + .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -33,7 +34,9 @@ pub fn start_tls( tracker: &Arc, ) -> impl Future> { let app = Router::new() - .route("/stats", get(get_stats).with_state(tracker.clone())) + .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) + .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index 00cdfe136..a08fd54d1 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use serde::Deserialize; + use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::Peer; use crate::tracker::Tracker; @@ -21,6 +23,52 @@ pub struct BasicInfo { pub leechers: u64, } +#[derive(Deserialize)] +pub struct Pagination { + pub offset: u32, + pub limit: u32, +} + +impl Pagination { + #[must_use] + pub fn new(offset: u32, limit: u32) -> Self { + Self { offset, limit } + } + + #[must_use] + pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { + let offset = match offset_option { + Some(offset) => offset, + None => Pagination::default_offset(), + }; + let limit = match limit_option { + Some(offset) => offset, + None => Pagination::default_limit(), + }; + + Self { offset, limit } + } + + #[must_use] + pub fn default_offset() -> u32 { + 0 + } + + #[must_use] + pub fn default_limit() -> u32 { + 4000 + } +} + +impl Default for Pagination { + fn default() -> Self { + Self { + offset: Self::default_offset(), + limit: Self::default_limit(), + } + } +} + pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { let db = tracker.get_torrents().await; @@ -48,7 +96,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op }) } -pub async fn get_torrents(tracker: Arc, offset: u32, limit: u32) -> Vec { +pub async fn get_torrents(tracker: Arc, pagination: &Pagination) -> Vec { let db = tracker.get_torrents().await; db.iter() @@ -61,8 +109,8 @@ pub async fn get_torrents(tracker: Arc, offset: u32, limit: u32) -> Vec leechers: u64::from(leechers), } }) - .skip(offset as usize) - .take(limit as usize) + .skip(pagination.offset as usize) + .take(pagination.limit as usize) .collect() } @@ -149,7 +197,7 @@ mod tests { use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; - use crate::tracker::services::torrent::{get_torrents, BasicInfo}; + use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) @@ -158,10 +206,8 @@ mod tests { #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let offset = 0; - let limit = 4000; - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; assert_eq!(torrents, vec![]); } @@ -169,8 +215,6 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let offset = 0; - let limit = 4000; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -179,7 +223,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) .await; - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; assert_eq!( torrents, @@ -211,7 +255,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; assert_eq!(torrents.len(), 1); } @@ -235,7 +279,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -265,10 +309,7 @@ mod tests { .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) .await; - let offset = 0; - let limit = 4000; - - let torrents = get_torrents(tracker.clone(), offset, limit).await; + let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; assert_eq!( torrents, diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index bc5271c21..e8d1e71eb 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -662,17 +662,115 @@ mod tracker_apis { mod for_torrent_resources { use std::str::FromStr; - use torrust_tracker::api::resource; use torrust_tracker::api::resource::torrent::Torrent; + use torrust_tracker::api::resource::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; - use crate::api::client::Client; + use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; use crate::api::Version; + #[tokio::test] + async fn should_allow_getting_torrents() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + api_server.add_torrent(&info_hash, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrents(Query::empty()) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_allow_limiting_the_torrents_in_the_result() { + let api_server = start_default_api(&Version::Axum).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + api_server.add_torrent(&info_hash_1, &sample_peer()).await; + api_server.add_torrent(&info_hash_2, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_allow_the_torrents_result_pagination() { + let api_server = start_default_api(&Version::Axum).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + api_server.add_torrent(&info_hash_1, &sample_peer()).await; + api_server.add_torrent(&info_hash_2, &sample_peer()).await; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) + .await; + + assert_eq!(response.status(), 200); + assert_eq!( + response.json::>().await.unwrap(), + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None // Torrent list does not include the peer list for each torrent + }] + ); + } + + #[tokio::test] + async fn should_not_allow_getting_torrents_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .get_torrents(Query::empty()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .get_torrents(Query::default()) + .await; + + assert_unauthorized(response).await; + } + #[tokio::test] async fn should_allow_getting_a_torrent_info() { let api_server = start_default_api(&Version::Axum).await; From e1ed929f1cf0abf450e246413536530accc4d904 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jan 2023 18:01:15 +0000 Subject: [PATCH 237/435] test(api): [#143] add tests for database failure --- src/databases/mod.rs | 5 ++ src/databases/mysql.rs | 24 ++++++++ src/databases/sqlite.rs | 22 ++++++++ src/tracker/mod.rs | 6 +- tests/api/asserts.rs | 49 +++++++++++----- tests/api/mod.rs | 11 ++++ tests/tracker_api.rs | 121 ++++++++++++++++++++++++++++++++++++++-- 7 files changed, 218 insertions(+), 20 deletions(-) diff --git a/src/databases/mod.rs b/src/databases/mod.rs index c1d265b56..873dd70eb 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -39,6 +39,11 @@ pub trait Database: Sync + Send { /// Will return `Error` if unable to create own tables. fn create_database_tables(&self) -> Result<(), Error>; + /// # Errors + /// + /// Will return `Err` if unable to drop tables. + fn drop_database_tables(&self) -> Result<(), Error>; + async fn load_persistent_torrents(&self) -> Result, Error>; async fn load_keys(&self) -> Result, Error>; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 8322b2273..71b06378c 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -74,6 +74,30 @@ impl Database for Mysql { Ok(()) } + fn drop_database_tables(&self) -> Result<(), Error> { + let drop_whitelist_table = " + DROP TABLE `whitelist`;" + .to_string(); + + let drop_torrents_table = " + DROP TABLE `torrents`;" + .to_string(); + + let drop_keys_table = " + DROP TABLE `keys`;" + .to_string(); + + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + + conn.query_drop(&drop_whitelist_table) + .expect("Could not drop `whitelist` table."); + conn.query_drop(&drop_torrents_table) + .expect("Could not drop `torrents` table."); + conn.query_drop(&drop_keys_table).expect("Could not drop `keys` table."); + + Ok(()) + } + async fn load_persistent_torrents(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index c5401aacf..1d7caf052 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -60,6 +60,28 @@ impl Database for Sqlite { .map(|_| ()) } + fn drop_database_tables(&self) -> Result<(), Error> { + let drop_whitelist_table = " + DROP TABLE whitelist;" + .to_string(); + + let drop_torrents_table = " + DROP TABLE torrents;" + .to_string(); + + let drop_keys_table = " + DROP TABLE keys;" + .to_string(); + + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + + conn.execute(&drop_whitelist_table, []) + .and_then(|_| conn.execute(&drop_torrents_table, [])) + .and_then(|_| conn.execute(&drop_keys_table, [])) + .map_err(|_| Error::InvalidQuery) + .map(|_| ()) + } + async fn load_persistent_torrents(&self) -> Result, Error> { let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index f33628355..50d006a3f 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -26,7 +26,7 @@ pub struct Tracker { torrents: RwLock>, stats_event_sender: Option>, stats_repository: statistics::Repo, - database: Box, + pub database: Box, } #[derive(Debug, PartialEq, Default)] @@ -130,7 +130,9 @@ impl Tracker { /// It adds a torrent to the whitelist if it has not been whitelisted previously async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - if self.database.is_info_hash_whitelisted(info_hash).await.unwrap() { + let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + + if is_whitelisted { return Ok(()); } diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 5e03c2573..6bf493bc6 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,25 +1,48 @@ use reqwest::Response; +pub async fn assert_torrent_not_known(response: Response) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); +} + pub async fn assert_token_not_valid(response: Response) { - assert_eq!(response.status(), 500); - assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); - assert_eq!( - response.text().await.unwrap(), - "Unhandled rejection: Err { reason: \"token not valid\" }" - ); + assert_unhandled_rejection(response, "token not valid").await; } pub async fn assert_unauthorized(response: Response) { + assert_unhandled_rejection(response, "unauthorized").await; +} + +pub async fn assert_failed_to_remove_torrent_from_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to remove torrent from whitelist").await; +} + +pub async fn assert_failed_to_whitelist_torrent(response: Response) { + assert_unhandled_rejection(response, "failed to whitelist torrent").await; +} + +pub async fn assert_failed_to_generate_key(response: Response) { + assert_unhandled_rejection(response, "failed to generate key").await; +} + +pub async fn assert_failed_to_delete_key(response: Response) { + assert_unhandled_rejection(response, "failed to delete key").await; +} + +pub async fn assert_failed_to_reload_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to reload whitelist").await; +} + +pub async fn assert_failed_to_reload_keys(response: Response) { + assert_unhandled_rejection(response, "failed to reload keys").await; +} + +async fn assert_unhandled_rejection(response: Response, reason: &str) { assert_eq!(response.status(), 500); assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); assert_eq!( response.text().await.unwrap(), - "Unhandled rejection: Err { reason: \"unauthorized\" }" + format!("Unhandled rejection: Err {{ reason: \"{reason}\" }}") ); } - -pub async fn assert_torrent_not_known(response: Response) { - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); -} diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 52980581f..1311a2356 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -1,3 +1,7 @@ +use std::sync::Arc; + +use torrust_tracker::tracker::Tracker; + pub mod asserts; pub mod client; pub mod connection_info; @@ -8,3 +12,10 @@ pub enum Version { Warp, Axum, } + +/// It forces a database error by dropping all tables. +/// That makes any query fail. +/// code-review: alternatively we could inject a database mock in the future. +pub fn force_database_error(tracker: &Arc) { + tracker.database.drop_database_tables().unwrap(); +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index e8d1e71eb..dc667a896 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -37,9 +37,11 @@ mod tracker_api { Keys: POST /api/key/:seconds_valid - GET /api/keys/reload DELETE /api/key/:key + Key command: + GET /api/keys/reload + */ mod for_stats_resources { @@ -291,11 +293,14 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{ + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, + assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized, + }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; - use crate::api::Version; + use crate::api::{force_database_error, Version}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { @@ -350,6 +355,38 @@ mod tracker_api { assert_unauthorized(response).await; } + #[tokio::test] + async fn should_return_an_error_when_the_torrent_cannot_be_whitelisted() { + let api_server = start_default_api(&Version::Warp).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .whitelist_a_torrent(&info_hash) + .await; + + assert_failed_to_whitelist_torrent(response).await; + } + + #[tokio::test] + async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + let api_server = start_default_api(&Version::Warp).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_failed_to_remove_torrent_from_whitelist(response).await; + } + #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; @@ -412,6 +449,23 @@ mod tracker_api { ); */ } + + #[tokio::test] + async fn should_return_an_error_when_the_whitelist_cannot_be_reloaded_from_the_database() { + let api_server = start_default_api(&Version::Warp).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .reload_whitelist() + .await; + + assert_failed_to_reload_whitelist(response).await; + } } mod for_key_resources { @@ -420,11 +474,14 @@ mod tracker_api { use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{ + assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_token_not_valid, + assert_unauthorized, + }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; - use crate::api::Version; + use crate::api::{force_database_error, Version}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { @@ -463,6 +520,20 @@ mod tracker_api { assert_unauthorized(response).await; } + #[tokio::test] + async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { + let api_server = start_default_api(&Version::Warp).await; + + force_database_error(&api_server.tracker); + + let seconds_valid = 60; + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .generate_auth_key(seconds_valid) + .await; + + assert_failed_to_generate_key(response).await; + } + #[tokio::test] async fn should_allow_deleting_an_auth_key() { let api_server = start_default_api(&Version::Warp).await; @@ -482,6 +553,26 @@ mod tracker_api { assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); } + #[tokio::test] + async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { + let api_server = start_default_api(&Version::Warp).await; + + let seconds_valid = 60; + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .delete_auth_key(&auth_key.key) + .await; + + assert_failed_to_delete_key(response).await; + } + #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; @@ -533,6 +624,26 @@ mod tracker_api { assert_eq!(response.status(), 200); } + #[tokio::test] + async fn should_return_an_error_when_keys_cannot_be_reloaded() { + let api_server = start_default_api(&Version::Warp).await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .reload_keys() + .await; + + assert_failed_to_reload_keys(response).await; + } + #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; From 5c5fcbd26f7e5f03d02d80105b85eaf6eb0d4b4d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 12:51:23 +0000 Subject: [PATCH 238/435] feat(api): [#143] axum api. POST /api/whitelist/:info_hash endpoint --- src/apis/routes.rs | 41 ++++++++++++++++++- src/apis/server.rs | 18 ++++++++- tests/tracker_api.rs | 94 ++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 143 insertions(+), 10 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index b86a468e2..1315c181a 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -3,8 +3,9 @@ use std::str::FromStr; use std::sync::Arc; use axum::extract::{Path, Query, State}; +use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Json, Response}; -use serde::{de, Deserialize, Deserializer}; +use serde::{de, Deserialize, Deserializer, Serialize}; use serde_json::json; use crate::api::resource::stats::Stats; @@ -14,6 +15,31 @@ use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; +#[derive(Serialize, Debug)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum ActionStatus<'a> { + Ok, + Err { reason: std::borrow::Cow<'a, str> }, +} + +fn response_ok() -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("{:?}", ActionStatus::Ok), + ) + .into_response() +} + +fn response_err(reason: String) -> Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), + ) + .into_response() +} + pub async fn get_stats_handler(State(tracker): State>) -> Json { Json(Stats::from(get_metrics(tracker.clone()).await)) } @@ -50,6 +76,19 @@ pub async fn get_torrents_handler( )) } +/// # Panics +/// +/// Will panic if it can't parse the infohash in the request +pub async fn add_torrent_to_whitelist_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + match tracker + .add_torrent_to_whitelist(&InfoHash::from_str(&info_hash).unwrap()) + .await + { + Ok(..) => response_ok(), + Err(..) => response_err("failed to whitelist torrent".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index 879160136..1c296cf56 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,7 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; -use axum::routing::get; +use axum::routing::{get, post}; use axum::{middleware, Router}; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; @@ -10,14 +10,21 @@ use log::info; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::{get_stats_handler, get_torrent_handler, get_torrents_handler}; +use super::routes::{add_torrent_to_whitelist_handler, get_stats_handler, get_torrent_handler, get_torrents_handler}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = Router::new() + // Stats .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + // Torrents .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) + // Whitelisted torrents + .route( + "/whitelist/:info_hash", + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -34,9 +41,16 @@ pub fn start_tls( tracker: &Arc, ) -> impl Future> { let app = Router::new() + // Stats .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + // Torrents .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) + // Whitelisted torrents + .route( + "/whitelist/:info_hash", + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index dc667a896..c85068521 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -371,36 +371,36 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + async fn should_allow_removing_a_torrent_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; - assert_failed_to_remove_torrent_from_whitelist(response).await; + assert_eq!(response.status(), 200); + assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } #[tokio::test] - async fn should_allow_removing_a_torrent_from_the_whitelist() { + async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + force_database_error(&api_server.tracker); + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .remove_torrent_from_whitelist(&hash) .await; - assert_eq!(response.status(), 200); - assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); + assert_failed_to_remove_torrent_from_whitelist(response).await; } #[tokio::test] @@ -943,4 +943,84 @@ mod tracker_apis { assert_unauthorized(response).await; } } + + mod for_whitelisted_torrent_resources { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::api::asserts::{assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized}; + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::server::start_default_api; + use crate::api::{force_database_error, Version}; + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let res = Client::new(api_server.get_connection_info(), &Version::Axum) + .whitelist_a_torrent(&info_hash) + .await; + + assert_eq!(res.status(), 200); + assert!( + api_server + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + } + + #[tokio::test] + async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let api_client = Client::new(api_server.get_connection_info(), &Version::Axum); + + let res = api_client.whitelist_a_torrent(&info_hash).await; + assert_eq!(res.status(), 200); + + let res = api_client.whitelist_a_torrent(&info_hash).await; + assert_eq!(res.status(), 200); + } + + #[tokio::test] + async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .whitelist_a_torrent(&info_hash) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .whitelist_a_torrent(&info_hash) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_return_an_error_when_the_torrent_cannot_be_whitelisted() { + let api_server = start_default_api(&Version::Axum).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .whitelist_a_torrent(&info_hash) + .await; + + assert_failed_to_whitelist_torrent(response).await; + } + } } From 2ddf2684b439d28ba9fd4743e13607472e688ee1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 13:32:58 +0000 Subject: [PATCH 239/435] feat(api): [#143] axum api. DELETE /api/whitelist/:info_hash endpoint --- src/apis/routes.rs | 16 ++++++++++++ src/apis/server.rs | 15 +++++++++-- tests/tracker_api.rs | 60 +++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 88 insertions(+), 3 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 1315c181a..9b909abed 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -89,6 +89,22 @@ pub async fn add_torrent_to_whitelist_handler(State(tracker): State } } +/// # Panics +/// +/// Will panic if it can't parse the infohash in the request +pub async fn delete_torrent_from_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match tracker + .remove_torrent_from_whitelist(&InfoHash::from_str(&info_hash).unwrap()) + .await + { + Ok(..) => response_ok(), + Err(..) => response_err("failed to remove torrent from whitelist".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index 1c296cf56..e03eae55a 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,7 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; -use axum::routing::{get, post}; +use axum::routing::{delete, get, post}; use axum::{middleware, Router}; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; @@ -10,7 +10,10 @@ use log::info; use warp::hyper; use super::middlewares::auth::auth; -use super::routes::{add_torrent_to_whitelist_handler, get_stats_handler, get_torrent_handler, get_torrents_handler}; +use super::routes::{ + add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, get_stats_handler, get_torrent_handler, + get_torrents_handler, +}; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { @@ -25,6 +28,10 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F "/whitelist/:info_hash", post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) + .route( + "/whitelist/:info_hash", + delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -51,6 +58,10 @@ pub fn start_tls( "/whitelist/:info_hash", post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) + .route( + "/whitelist/:info_hash", + delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index c85068521..0acf7e428 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -949,7 +949,10 @@ mod tracker_apis { use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{ + assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_token_not_valid, + assert_unauthorized, + }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; @@ -1022,5 +1025,60 @@ mod tracker_apis { assert_failed_to_whitelist_torrent(response).await; } + + #[tokio::test] + async fn should_allow_removing_a_torrent_from_the_whitelist() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_eq!(response.status(), 200); + assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); + } + + #[tokio::test] + async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_failed_to_remove_torrent_from_whitelist(response).await; + } + + #[tokio::test] + async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_token_not_valid(response).await; + + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_unauthorized(response).await; + } } } From a58d8310fd36a99e60b05ce61ccff85f5632cc71 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 14:01:42 +0000 Subject: [PATCH 240/435] feat(api): [#143] axum api. GET /api/whitelist/reload endpoint --- src/apis/routes.rs | 7 +++++++ src/apis/server.rs | 14 +++++++++++++- tests/api/asserts.rs | 2 ++ tests/tracker_api.rs | 46 ++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 66 insertions(+), 3 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 9b909abed..93209c285 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -105,6 +105,13 @@ pub async fn delete_torrent_from_whitelist_handler( } } +pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { + match tracker.load_whitelist().await { + Ok(..) => response_ok(), + Err(..) => response_err("failed to reload whitelist".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index e03eae55a..fb0e4b376 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -12,11 +12,12 @@ use warp::hyper; use super::middlewares::auth::auth; use super::routes::{ add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, get_stats_handler, get_torrent_handler, - get_torrents_handler, + get_torrents_handler, reload_whitelist_handler, }; use crate::tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { + // todo: duplicate routes definition. See `start_tls` function. let app = Router::new() // Stats .route("/stats", get(get_stats_handler).with_state(tracker.clone())) @@ -32,6 +33,11 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F "/whitelist/:info_hash", delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), ) + // Whitelist command + .route( + "/whitelist/:info_hash", + get(reload_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -47,6 +53,7 @@ pub fn start_tls( ssl_config: RustlsConfig, tracker: &Arc, ) -> impl Future> { + // todo: duplicate routes definition. See `start` function. let app = Router::new() // Stats .route("/stats", get(get_stats_handler).with_state(tracker.clone())) @@ -62,6 +69,11 @@ pub fn start_tls( "/whitelist/:info_hash", delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), ) + // Whitelist command + .route( + "/whitelist/:info_hash", + get(reload_whitelist_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 6bf493bc6..0a2b3fad6 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,3 +1,5 @@ +// code-review: should we use macros to return the exact line where the assert fails? + use reqwest::Response; pub async fn assert_torrent_not_known(response: Response) { diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 0acf7e428..37a6033c7 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -950,8 +950,8 @@ mod tracker_apis { use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{ - assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_token_not_valid, - assert_unauthorized, + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, + assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1080,5 +1080,47 @@ mod tracker_apis { assert_unauthorized(response).await; } + + #[tokio::test] + async fn should_allow_reload_the_whitelist_from_the_database() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .reload_whitelist() + .await; + + assert_eq!(response.status(), 200); + /* This assert fails because the whitelist has not been reloaded yet. + We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent + is whitelisted and use that endpoint to check if the torrent is still there after reloading. + assert!( + !(api_server + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await) + ); + */ + } + + #[tokio::test] + async fn should_return_an_error_when_the_whitelist_cannot_be_reloaded_from_the_database() { + let api_server = start_default_api(&Version::Axum).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .reload_whitelist() + .await; + + assert_failed_to_reload_whitelist(response).await; + } } } From 0282e33931718e71ab4f2b05b9948cc50ccc91b1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 14:31:23 +0000 Subject: [PATCH 241/435] feat(api): [#143] axum api. POST /api/key/:seconds_valid endpoint --- src/apis/routes.rs | 18 ++++++++++++ src/apis/server.rs | 14 +++++++-- tests/tracker_api.rs | 68 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 2 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 93209c285..d9b9c2691 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,6 +1,7 @@ use std::fmt; use std::str::FromStr; use std::sync::Arc; +use std::time::Duration; use axum::extract::{Path, Query, State}; use axum::http::{header, StatusCode}; @@ -8,6 +9,7 @@ use axum::response::{IntoResponse, Json, Response}; use serde::{de, Deserialize, Deserializer, Serialize}; use serde_json::json; +use crate::api::resource::auth_key::AuthKey; use crate::api::resource::stats::Stats; use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; @@ -40,6 +42,15 @@ fn response_err(reason: String) -> Response { .into_response() } +fn response_auth_key(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + pub async fn get_stats_handler(State(tracker): State>) -> Json { Json(Stats::from(get_metrics(tracker.clone()).await)) } @@ -112,6 +123,13 @@ pub async fn reload_whitelist_handler(State(tracker): State>) -> Re } } +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid): Path) -> Response { + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), + Err(_) => response_err("failed to generate key".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index fb0e4b376..ecf2a54cc 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -11,8 +11,8 @@ use warp::hyper; use super::middlewares::auth::auth; use super::routes::{ - add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, get_stats_handler, get_torrent_handler, - get_torrents_handler, reload_whitelist_handler, + add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, get_stats_handler, + get_torrent_handler, get_torrents_handler, reload_whitelist_handler, }; use crate::tracker; @@ -38,6 +38,11 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F "/whitelist/:info_hash", get(reload_whitelist_handler).with_state(tracker.clone()), ) + // Keys + .route( + "/key/:seconds_valid", + post(generate_auth_key_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -74,6 +79,11 @@ pub fn start_tls( "/whitelist/:info_hash", get(reload_whitelist_handler).with_state(tracker.clone()), ) + // Keys + .route( + "/key/:seconds_valid", + post(generate_auth_key_handler).with_state(tracker.clone()), + ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 37a6033c7..7ec789b22 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1123,4 +1123,72 @@ mod tracker_apis { assert_failed_to_reload_whitelist(response).await; } } + + mod for_key_resources { + //use std::time::Duration; + + use torrust_tracker::api::resource::auth_key::AuthKey; + use torrust_tracker::tracker::auth::Key; + + use crate::api::asserts::{assert_failed_to_generate_key, assert_token_not_valid, assert_unauthorized}; + /*use crate::api::asserts::{ + assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_token_not_valid, + assert_unauthorized, + };*/ + use crate::api::client::Client; + use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::server::start_default_api; + use crate::api::{force_database_error, Version}; + + #[tokio::test] + async fn should_allow_generating_a_new_auth_key() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .generate_auth_key(seconds_valid) + .await; + + // Verify the key with the tracker + assert!(api_server + .tracker + .verify_auth_key(&Key::from(response.json::().await.unwrap())) + .await + .is_ok()); + } + + #[tokio::test] + async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { + let api_server = start_default_api(&Version::Axum).await; + + force_database_error(&api_server.tracker); + + let seconds_valid = 60; + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .generate_auth_key(seconds_valid) + .await; + + assert_failed_to_generate_key(response).await; + } + } } From 6b2e3bcfb55b75de20af4505f0f45d680b64da25 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 16:10:10 +0000 Subject: [PATCH 242/435] feat(api): [#143] axum api. DELETE /api/key/:key endpoint --- src/apis/routes.rs | 11 ++++-- src/apis/server.rs | 18 ++++++---- tests/api/asserts.rs | 6 ++++ tests/tracker_api.rs | 81 ++++++++++++++++++++++++++++++++++++++++---- 4 files changed, 102 insertions(+), 14 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index d9b9c2691..305ecefcc 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -123,13 +123,20 @@ pub async fn reload_whitelist_handler(State(tracker): State>) -> Re } } -pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid): Path) -> Response { - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + match tracker.generate_auth_key(Duration::from_secs(seconds_valid_or_key)).await { Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), Err(_) => response_err("failed to generate key".to_string()), } } +pub async fn delete_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + match tracker.remove_auth_key(&seconds_valid_or_key).await { + Ok(_) => response_ok(), + Err(_) => response_err("failed to delete key".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index ecf2a54cc..1184908de 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -11,8 +11,8 @@ use warp::hyper; use super::middlewares::auth::auth; use super::routes::{ - add_torrent_to_whitelist_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, get_stats_handler, - get_torrent_handler, get_torrents_handler, reload_whitelist_handler, + add_torrent_to_whitelist_handler, delete_auth_key_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, + get_stats_handler, get_torrent_handler, get_torrents_handler, reload_whitelist_handler, }; use crate::tracker; @@ -40,8 +40,11 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F ) // Keys .route( - "/key/:seconds_valid", - post(generate_auth_key_handler).with_state(tracker.clone()), + "/key/:seconds_valid_or_key", + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); @@ -81,8 +84,11 @@ pub fn start_tls( ) // Keys .route( - "/key/:seconds_valid", - post(generate_auth_key_handler).with_state(tracker.clone()), + "/key/:seconds_valid_or_key", + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), ) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 0a2b3fad6..e502292f3 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -2,6 +2,12 @@ use reqwest::Response; +pub async fn assert_ok(response: Response) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), "Ok"); +} + pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 7ec789b22..30c4fa9db 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1125,16 +1125,14 @@ mod tracker_apis { } mod for_key_resources { - //use std::time::Duration; + use std::time::Duration; use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; - use crate::api::asserts::{assert_failed_to_generate_key, assert_token_not_valid, assert_unauthorized}; - /*use crate::api::asserts::{ - assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_token_not_valid, - assert_unauthorized, - };*/ + use crate::api::asserts::{ + assert_failed_to_delete_key, assert_failed_to_generate_key, assert_ok, assert_token_not_valid, assert_unauthorized, + }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; @@ -1190,5 +1188,76 @@ mod tracker_apis { assert_failed_to_generate_key(response).await; } + + #[tokio::test] + async fn should_allow_deleting_an_auth_key() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .delete_auth_key(&auth_key.key) + .await; + + assert_ok(response).await; + } + + #[tokio::test] + async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { + let api_server = start_default_api(&Version::Warp).await; + + let seconds_valid = 60; + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Warp) + .delete_auth_key(&auth_key.key) + .await; + + assert_failed_to_delete_key(response).await; + } + + #[tokio::test] + async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Warp).await; + + let seconds_valid = 60; + + // Generate new auth key + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + .delete_auth_key(&auth_key.key) + .await; + + assert_token_not_valid(response).await; + + // Generate new auth key + let auth_key = api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + .delete_auth_key(&auth_key.key) + .await; + + assert_unauthorized(response).await; + } } } From 03ba166bdc8ebed49305613a175525cab324aea3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 16:18:31 +0000 Subject: [PATCH 243/435] feat(api): [#143] axum api. GET /api/keys/reload endpoint --- src/apis/routes.rs | 7 +++++ src/apis/server.rs | 6 +++- tests/tracker_api.rs | 75 ++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 81 insertions(+), 7 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 305ecefcc..b9d0603b5 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -137,6 +137,13 @@ pub async fn delete_auth_key_handler(State(tracker): State>, Path(s } } +pub async fn reload_keys_handler(State(tracker): State>) -> Response { + match tracker.load_keys().await { + Ok(..) => response_ok(), + Err(..) => response_err("failed to reload keys".to_string()), + } +} + /// Serde deserialization decorator to map empty Strings to None, fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> where diff --git a/src/apis/server.rs b/src/apis/server.rs index 1184908de..d046f1714 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -12,7 +12,7 @@ use warp::hyper; use super::middlewares::auth::auth; use super::routes::{ add_torrent_to_whitelist_handler, delete_auth_key_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, - get_stats_handler, get_torrent_handler, get_torrents_handler, reload_whitelist_handler, + get_stats_handler, get_torrent_handler, get_torrents_handler, reload_keys_handler, reload_whitelist_handler, }; use crate::tracker; @@ -46,6 +46,8 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) + // Key command + .route("/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -90,6 +92,8 @@ pub fn start_tls( .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) + // Key command + .route("/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 30c4fa9db..46a11b482 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1131,7 +1131,8 @@ mod tracker_apis { use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_failed_to_delete_key, assert_failed_to_generate_key, assert_ok, assert_token_not_valid, assert_unauthorized, + assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_ok, + assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1209,7 +1210,7 @@ mod tracker_apis { #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { - let api_server = start_default_api(&Version::Warp).await; + let api_server = start_default_api(&Version::Axum).await; let seconds_valid = 60; let auth_key = api_server @@ -1220,7 +1221,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info(), &Version::Axum) .delete_auth_key(&auth_key.key) .await; @@ -1229,7 +1230,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; + let api_server = start_default_api(&Version::Axum).await; let seconds_valid = 60; @@ -1240,7 +1241,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) .delete_auth_key(&auth_key.key) .await; @@ -1253,11 +1254,73 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) .delete_auth_key(&auth_key.key) .await; assert_unauthorized(response).await; } + + #[tokio::test] + async fn should_allow_reloading_keys() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .reload_keys() + .await; + + assert_eq!(response.status(), 200); + } + + #[tokio::test] + async fn should_return_an_error_when_keys_cannot_be_reloaded() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&api_server.tracker); + + let response = Client::new(api_server.get_connection_info(), &Version::Axum) + .reload_keys() + .await; + + assert_failed_to_reload_keys(response).await; + } + + #[tokio::test] + async fn should_not_allow_reloading_keys_for_unauthenticated_users() { + let api_server = start_default_api(&Version::Axum).await; + + let seconds_valid = 60; + api_server + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + .reload_keys() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + .reload_keys() + .await; + + assert_unauthorized(response).await; + } } } From 5d9dd9d90164e59915a200d66b5f2a293cbd38d1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 17:50:18 +0000 Subject: [PATCH 244/435] refactor(api): extract asserts in tests --- src/apis/routes.rs | 4 +- tests/api/asserts.rs | 46 ++++++++++- tests/tracker_api.rs | 193 ++++++++++++++++++++++--------------------- 3 files changed, 145 insertions(+), 98 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index b9d0603b5..7b1dc53f9 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -27,8 +27,8 @@ pub enum ActionStatus<'a> { fn response_ok() -> Response { ( StatusCode::OK, - [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - format!("{:?}", ActionStatus::Ok), + [(header::CONTENT_TYPE, "application/json")], + serde_json::to_string(&ActionStatus::Ok).unwrap(), ) .into_response() } diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index e502292f3..1bc067490 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,13 +1,55 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; +use torrust_tracker::api::resource::auth_key::AuthKey; +use torrust_tracker::api::resource::stats::Stats; +use torrust_tracker::api::resource::torrent::{ListItem, Torrent}; + +// Resource responses + +pub async fn assert_stats(response: Response, stats: Stats) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), stats); +} + +pub async fn assert_torrent_list(response: Response, torrents: Vec) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::>().await.unwrap(), torrents); +} + +pub async fn assert_torrent_info(response: Response, torrent: Torrent) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), torrent); +} + +pub async fn assert_auth_key(response: Response) -> AuthKey { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + response.json::().await.unwrap() +} + +pub async fn assert_auth_key_utf8(response: Response) -> AuthKey { + assert_eq!(response.status(), 200); + assert_eq!( + response.headers().get("content-type").unwrap(), + "application/json; charset=utf-8" + ); + response.json::().await.unwrap() +} + +// OK response pub async fn assert_ok(response: Response) { assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); - assert_eq!(response.text().await.unwrap(), "Ok"); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); } +// Error responses + pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 46a11b482..ca1d2332d 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -50,7 +50,7 @@ mod tracker_api { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -72,9 +72,8 @@ mod tracker_api { .get_tracker_statistics() .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::().await.unwrap(), + assert_stats( + response, Stats { torrents: 1, seeders: 1, @@ -92,8 +91,9 @@ mod tracker_api { udp6_connections_handled: 0, udp6_announces_handled: 0, udp6_scrapes_handled: 0, - } - ); + }, + ) + .await; } #[tokio::test] @@ -121,7 +121,9 @@ mod tracker_api { use torrust_tracker::api::resource::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; + use crate::api::asserts::{ + assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, + }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -140,17 +142,17 @@ mod tracker_api { .get_torrents(Query::empty()) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -168,17 +170,17 @@ mod tracker_api { .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -196,17 +198,17 @@ mod tracker_api { .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -240,17 +242,17 @@ mod tracker_api { .get_torrent(&info_hash.to_string()) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::().await.unwrap(), + assert_torrent_info( + response, Torrent { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![resource::peer::Peer::from(peer)]) - } - ); + peers: Some(vec![resource::peer::Peer::from(peer)]), + }, + ) + .await; } #[tokio::test] @@ -295,7 +297,7 @@ mod tracker_api { use crate::api::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized, + assert_failed_to_whitelist_torrent, assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -308,11 +310,11 @@ mod tracker_api { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info(), &Version::Warp) .whitelist_a_torrent(&info_hash) .await; - assert_eq!(res.status(), 200); + assert_ok(response).await; assert!( api_server .tracker @@ -329,11 +331,11 @@ mod tracker_api { let api_client = Client::new(api_server.get_connection_info(), &Version::Warp); - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; } #[tokio::test] @@ -382,7 +384,7 @@ mod tracker_api { .remove_torrent_from_whitelist(&hash) .await; - assert_eq!(response.status(), 200); + assert_ok(response).await; assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } @@ -437,8 +439,8 @@ mod tracker_api { .reload_whitelist() .await; - assert_eq!(response.status(), 200); - /* This assert fails because the whitelist has not been reloaded yet. + assert_ok(response).await; + /* todo: this assert fails because the whitelist has not been reloaded yet. We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent is whitelisted and use that endpoint to check if the torrent is still there after reloading. assert!( @@ -471,12 +473,11 @@ mod tracker_api { mod for_key_resources { use std::time::Duration; - use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_token_not_valid, - assert_unauthorized, + assert_auth_key, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_ok, + assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -493,10 +494,12 @@ mod tracker_api { .generate_auth_key(seconds_valid) .await; + let auth_key_resource = assert_auth_key(response).await; + // Verify the key with the tracker assert!(api_server .tracker - .verify_auth_key(&Key::from(response.json::().await.unwrap())) + .verify_auth_key(&Key::from(auth_key_resource)) .await .is_ok()); } @@ -549,8 +552,7 @@ mod tracker_api { .delete_auth_key(&auth_key.key) .await; - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); + assert_ok(response).await; } #[tokio::test] @@ -621,7 +623,7 @@ mod tracker_api { .reload_keys() .await; - assert_eq!(response.status(), 200); + assert_ok(response).await; } #[tokio::test] @@ -706,7 +708,7 @@ mod tracker_apis { use torrust_tracker::api::resource::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -728,9 +730,8 @@ mod tracker_apis { .get_tracker_statistics() .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::().await.unwrap(), + assert_stats( + response, Stats { torrents: 1, seeders: 1, @@ -748,8 +749,9 @@ mod tracker_apis { udp6_connections_handled: 0, udp6_announces_handled: 0, udp6_scrapes_handled: 0, - } - ); + }, + ) + .await; } #[tokio::test] @@ -777,7 +779,9 @@ mod tracker_apis { use torrust_tracker::api::resource::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; - use crate::api::asserts::{assert_token_not_valid, assert_torrent_not_known, assert_unauthorized}; + use crate::api::asserts::{ + assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, + }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; @@ -796,17 +800,17 @@ mod tracker_apis { .get_torrents(Query::empty()) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -824,17 +828,17 @@ mod tracker_apis { .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -852,17 +856,17 @@ mod tracker_apis { .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::>().await.unwrap(), + assert_torrent_list( + response, vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: None // Torrent list does not include the peer list for each torrent - }] - ); + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; } #[tokio::test] @@ -896,17 +900,17 @@ mod tracker_apis { .get_torrent(&info_hash.to_string()) .await; - assert_eq!(response.status(), 200); - assert_eq!( - response.json::().await.unwrap(), + assert_torrent_info( + response, Torrent { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![resource::peer::Peer::from(peer)]) - } - ); + peers: Some(vec![resource::peer::Peer::from(peer)]), + }, + ) + .await; } #[tokio::test] @@ -951,7 +955,7 @@ mod tracker_apis { use crate::api::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_token_not_valid, assert_unauthorized, + assert_failed_to_whitelist_torrent, assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -964,11 +968,11 @@ mod tracker_apis { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let res = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info(), &Version::Axum) .whitelist_a_torrent(&info_hash) .await; - assert_eq!(res.status(), 200); + assert_ok(response).await; assert!( api_server .tracker @@ -985,11 +989,11 @@ mod tracker_apis { let api_client = Client::new(api_server.get_connection_info(), &Version::Axum); - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; - let res = api_client.whitelist_a_torrent(&info_hash).await; - assert_eq!(res.status(), 200); + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; } #[tokio::test] @@ -1038,7 +1042,7 @@ mod tracker_apis { .remove_torrent_from_whitelist(&hash) .await; - assert_eq!(response.status(), 200); + assert_ok(response).await; assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } @@ -1093,8 +1097,8 @@ mod tracker_apis { .reload_whitelist() .await; - assert_eq!(response.status(), 200); - /* This assert fails because the whitelist has not been reloaded yet. + assert_ok(response).await; + /* todo: this assert fails because the whitelist has not been reloaded yet. We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent is whitelisted and use that endpoint to check if the torrent is still there after reloading. assert!( @@ -1127,12 +1131,11 @@ mod tracker_apis { mod for_key_resources { use std::time::Duration; - use torrust_tracker::api::resource::auth_key::AuthKey; use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_ok, - assert_token_not_valid, assert_unauthorized, + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1149,10 +1152,12 @@ mod tracker_apis { .generate_auth_key(seconds_valid) .await; + let auth_key_resource = assert_auth_key_utf8(response).await; + // Verify the key with the tracker assert!(api_server .tracker - .verify_auth_key(&Key::from(response.json::().await.unwrap())) + .verify_auth_key(&Key::from(auth_key_resource)) .await .is_ok()); } @@ -1276,7 +1281,7 @@ mod tracker_apis { .reload_keys() .await; - assert_eq!(response.status(), 200); + assert_ok(response).await; } #[tokio::test] From 504cb9e8d6ce24eed5e7e0e5dc24b673ced3541d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 18:09:16 +0000 Subject: [PATCH 245/435] feat(api): the new Axum api uses the URL prefix /api too. Initially we were using a ddifferent URl to avoid conflicts with the Warp implementation but sicne we are using different ports that is not a problem anymore. This change simplifies switching to the new Axum API, since we only have to start using the new implementation in the port set in the configuration (1212), instead of the temporarily created port for the Axum implementation (1313). --- src/apis/server.rs | 42 ++++++----- tests/api/client.rs | 8 +-- tests/tracker_api.rs | 164 +++++++++++++++++++------------------------ 3 files changed, 98 insertions(+), 116 deletions(-) diff --git a/src/apis/server.rs b/src/apis/server.rs index d046f1714..55f71f9cc 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -20,34 +20,37 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F // todo: duplicate routes definition. See `start_tls` function. let app = Router::new() // Stats - .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) // Torrents - .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) - .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) + .route( + "/api/torrent/:info_hash", + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) // Whitelisted torrents .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), ) // Whitelist command .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", get(reload_whitelist_handler).with_state(tracker.clone()), ) // Keys .route( - "/key/:seconds_valid_or_key", + "/api/key/:seconds_valid_or_key", post(generate_auth_key_handler) .with_state(tracker.clone()) .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) - // Key command - .route("/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) + // Keys command + .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -66,34 +69,37 @@ pub fn start_tls( // todo: duplicate routes definition. See `start` function. let app = Router::new() // Stats - .route("/stats", get(get_stats_handler).with_state(tracker.clone())) + .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) // Torrents - .route("/torrent/:info_hash", get(get_torrent_handler).with_state(tracker.clone())) - .route("/torrents", get(get_torrents_handler).with_state(tracker.clone())) + .route( + "/api/torrent/:info_hash", + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) // Whitelisted torrents .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), ) // Whitelist command .route( - "/whitelist/:info_hash", + "/api/whitelist/:info_hash", get(reload_whitelist_handler).with_state(tracker.clone()), ) // Keys .route( - "/key/:seconds_valid_or_key", + "/api/key/:seconds_valid_or_key", post(generate_auth_key_handler) .with_state(tracker.clone()) .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) - // Key command - .route("/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) + // Keys command + .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); let handle = Handle::new(); diff --git a/tests/api/client.rs b/tests/api/client.rs index e507d817f..b073adefd 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -1,7 +1,6 @@ use reqwest::Response; use super::connection_info::ConnectionInfo; -use super::Version; pub struct Client { connection_info: ConnectionInfo, @@ -68,13 +67,10 @@ impl From for ReqwestQueryParam { } impl Client { - pub fn new(connection_info: ConnectionInfo, version: &Version) -> Self { + pub fn new(connection_info: ConnectionInfo) -> Self { Self { connection_info, - base_path: match version { - Version::Warp => "/api/".to_string(), - Version::Axum => "/".to_string(), - }, + base_path: "/api/".to_string(), } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index ca1d2332d..72311e71c 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -39,7 +39,7 @@ mod tracker_api { POST /api/key/:seconds_valid DELETE /api/key/:key - Key command: + Keys command: GET /api/keys/reload */ @@ -68,9 +68,7 @@ mod tracker_api { ) .await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .get_tracker_statistics() - .await; + let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; assert_stats( response, @@ -100,13 +98,13 @@ mod tracker_api { async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_tracker_statistics() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_tracker_statistics() .await; @@ -138,7 +136,7 @@ mod tracker_api { api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::empty()) .await; @@ -166,7 +164,7 @@ mod tracker_api { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -194,7 +192,7 @@ mod tracker_api { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -215,13 +213,13 @@ mod tracker_api { async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrents(Query::empty()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_torrents(Query::default()) .await; @@ -238,7 +236,7 @@ mod tracker_api { api_server.add_torrent(&info_hash, &peer).await; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -261,7 +259,7 @@ mod tracker_api { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -276,13 +274,13 @@ mod tracker_api { api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) .await; @@ -310,7 +308,7 @@ mod tracker_api { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -329,7 +327,7 @@ mod tracker_api { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info(), &Version::Warp); + let api_client = Client::new(api_server.get_connection_info()); let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; @@ -344,13 +342,13 @@ mod tracker_api { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .whitelist_a_torrent(&info_hash) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .whitelist_a_torrent(&info_hash) .await; @@ -365,7 +363,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -380,7 +378,7 @@ mod tracker_api { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; @@ -398,7 +396,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; @@ -413,14 +411,14 @@ mod tracker_api { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .remove_torrent_from_whitelist(&hash) .await; assert_token_not_valid(response).await; api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .remove_torrent_from_whitelist(&hash) .await; @@ -435,9 +433,7 @@ mod tracker_api { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .reload_whitelist() - .await; + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; assert_ok(response).await; /* todo: this assert fails because the whitelist has not been reloaded yet. @@ -462,9 +458,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .reload_whitelist() - .await; + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; assert_failed_to_reload_whitelist(response).await; } @@ -490,7 +484,7 @@ mod tracker_api { let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; @@ -510,13 +504,13 @@ mod tracker_api { let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .generate_auth_key(seconds_valid) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .generate_auth_key(seconds_valid) .await; @@ -530,7 +524,7 @@ mod tracker_api { force_database_error(&api_server.tracker); let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; @@ -548,7 +542,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .delete_auth_key(&auth_key.key) .await; @@ -568,7 +562,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) + let response = Client::new(api_server.get_connection_info()) .delete_auth_key(&auth_key.key) .await; @@ -588,7 +582,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .delete_auth_key(&auth_key.key) .await; @@ -601,7 +595,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .delete_auth_key(&auth_key.key) .await; @@ -619,9 +613,7 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .reload_keys() - .await; + let response = Client::new(api_server.get_connection_info()).reload_keys().await; assert_ok(response).await; } @@ -639,9 +631,7 @@ mod tracker_api { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Warp) - .reload_keys() - .await; + let response = Client::new(api_server.get_connection_info()).reload_keys().await; assert_failed_to_reload_keys(response).await; } @@ -657,13 +647,13 @@ mod tracker_api { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .reload_keys() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Warp) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .reload_keys() .await; @@ -697,7 +687,7 @@ mod tracker_apis { - [ ] POST /api/key/:seconds_valid - [ ] DELETE /api/key/:key - Key commands + Keys commands - [ ] GET /api/keys/reload */ @@ -726,9 +716,7 @@ mod tracker_apis { ) .await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .get_tracker_statistics() - .await; + let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; assert_stats( response, @@ -758,13 +746,13 @@ mod tracker_apis { async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_tracker_statistics() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_tracker_statistics() .await; @@ -796,7 +784,7 @@ mod tracker_apis { api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::empty()) .await; @@ -824,7 +812,7 @@ mod tracker_apis { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -852,7 +840,7 @@ mod tracker_apis { api_server.add_torrent(&info_hash_1, &sample_peer()).await; api_server.add_torrent(&info_hash_2, &sample_peer()).await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -873,13 +861,13 @@ mod tracker_apis { async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrents(Query::empty()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_torrents(Query::default()) .await; @@ -896,7 +884,7 @@ mod tracker_apis { api_server.add_torrent(&info_hash, &peer).await; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -919,7 +907,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -934,13 +922,13 @@ mod tracker_apis { api_server.add_torrent(&info_hash, &sample_peer()).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) .await; @@ -968,7 +956,7 @@ mod tracker_apis { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -987,7 +975,7 @@ mod tracker_apis { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info(), &Version::Axum); + let api_client = Client::new(api_server.get_connection_info()); let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; @@ -1002,13 +990,13 @@ mod tracker_apis { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .whitelist_a_torrent(&info_hash) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .whitelist_a_torrent(&info_hash) .await; @@ -1023,7 +1011,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; @@ -1038,7 +1026,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; @@ -1056,7 +1044,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; @@ -1071,14 +1059,14 @@ mod tracker_apis { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .remove_torrent_from_whitelist(&hash) .await; assert_token_not_valid(response).await; api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .remove_torrent_from_whitelist(&hash) .await; @@ -1093,9 +1081,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str(&hash).unwrap(); api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .reload_whitelist() - .await; + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; assert_ok(response).await; /* todo: this assert fails because the whitelist has not been reloaded yet. @@ -1120,9 +1106,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .reload_whitelist() - .await; + let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; assert_failed_to_reload_whitelist(response).await; } @@ -1148,7 +1132,7 @@ mod tracker_apis { let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; @@ -1168,13 +1152,13 @@ mod tracker_apis { let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .generate_auth_key(seconds_valid) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .generate_auth_key(seconds_valid) .await; @@ -1188,7 +1172,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .generate_auth_key(seconds_valid) .await; @@ -1206,7 +1190,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .delete_auth_key(&auth_key.key) .await; @@ -1226,7 +1210,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) + let response = Client::new(api_server.get_connection_info()) .delete_auth_key(&auth_key.key) .await; @@ -1246,7 +1230,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .delete_auth_key(&auth_key.key) .await; @@ -1259,7 +1243,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .delete_auth_key(&auth_key.key) .await; @@ -1277,9 +1261,7 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .reload_keys() - .await; + let response = Client::new(api_server.get_connection_info()).reload_keys().await; assert_ok(response).await; } @@ -1297,9 +1279,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); - let response = Client::new(api_server.get_connection_info(), &Version::Axum) - .reload_keys() - .await; + let response = Client::new(api_server.get_connection_info()).reload_keys().await; assert_failed_to_reload_keys(response).await; } @@ -1315,13 +1295,13 @@ mod tracker_apis { .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .reload_keys() .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address()), &Version::Axum) + let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) .reload_keys() .await; From c502c1d939ef9d51c0457c5b3ebb73d56a04d9b0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jan 2023 18:32:15 +0000 Subject: [PATCH 246/435] refactor(api): [#143] remove duplicate definition of axum router --- src/apis/routes.rs | 44 ++++++++++++++++++++++++ src/apis/server.rs | 86 ++++------------------------------------------ 2 files changed, 50 insertions(+), 80 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 7b1dc53f9..e46e3653d 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -6,9 +6,12 @@ use std::time::Duration; use axum::extract::{Path, Query, State}; use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Json, Response}; +use axum::routing::{delete, get, post}; +use axum::{middleware, Router}; use serde::{de, Deserialize, Deserializer, Serialize}; use serde_json::json; +use super::middlewares::auth::auth; use crate::api::resource::auth_key::AuthKey; use crate::api::resource::stats::Stats; use crate::api::resource::torrent::{ListItem, Torrent}; @@ -17,6 +20,47 @@ use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; +pub fn router(tracker: &Arc) -> Router { + Router::new() + // Stats + .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) + // Torrents + .route( + "/api/torrent/:info_hash", + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) + // Whitelisted torrents + .route( + "/api/whitelist/:info_hash", + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) + .route( + "/api/whitelist/:info_hash", + delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) + // Whitelist command + .route( + "/api/whitelist/:info_hash", + get(reload_whitelist_handler).with_state(tracker.clone()), + ) + // Keys + .route( + // code-review: Axum does not allow two routes with the same path but different path variable name. + // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: + // POST /api/key + // DELETE /api/key/:key + "/api/key/:seconds_valid_or_key", + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), + ) + // Keys command + .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) + .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) +} + #[derive(Serialize, Debug)] #[serde(tag = "status", rename_all = "snake_case")] pub enum ActionStatus<'a> { diff --git a/src/apis/server.rs b/src/apis/server.rs index 55f71f9cc..bbb3e5852 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,57 +1,17 @@ use std::net::SocketAddr; use std::sync::Arc; -use axum::routing::{delete, get, post}; -use axum::{middleware, Router}; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use futures::Future; use log::info; use warp::hyper; -use super::middlewares::auth::auth; -use super::routes::{ - add_torrent_to_whitelist_handler, delete_auth_key_handler, delete_torrent_from_whitelist_handler, generate_auth_key_handler, - get_stats_handler, get_torrent_handler, get_torrents_handler, reload_keys_handler, reload_whitelist_handler, -}; -use crate::tracker; +use super::routes::router; +use crate::tracker::Tracker; -pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { - // todo: duplicate routes definition. See `start_tls` function. - let app = Router::new() - // Stats - .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) - // Torrents - .route( - "/api/torrent/:info_hash", - get(get_torrent_handler).with_state(tracker.clone()), - ) - .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) - // Whitelisted torrents - .route( - "/api/whitelist/:info_hash", - post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), - ) - .route( - "/api/whitelist/:info_hash", - delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), - ) - // Whitelist command - .route( - "/api/whitelist/:info_hash", - get(reload_whitelist_handler).with_state(tracker.clone()), - ) - // Keys - .route( - "/api/key/:seconds_valid_or_key", - post(generate_auth_key_handler) - .with_state(tracker.clone()) - .delete(delete_auth_key_handler) - .with_state(tracker.clone()), - ) - // Keys command - .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) - .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { + let app = router(tracker); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -64,43 +24,9 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl F pub fn start_tls( socket_addr: SocketAddr, ssl_config: RustlsConfig, - tracker: &Arc, + tracker: &Arc, ) -> impl Future> { - // todo: duplicate routes definition. See `start` function. - let app = Router::new() - // Stats - .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) - // Torrents - .route( - "/api/torrent/:info_hash", - get(get_torrent_handler).with_state(tracker.clone()), - ) - .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) - // Whitelisted torrents - .route( - "/api/whitelist/:info_hash", - post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), - ) - .route( - "/api/whitelist/:info_hash", - delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), - ) - // Whitelist command - .route( - "/api/whitelist/:info_hash", - get(reload_whitelist_handler).with_state(tracker.clone()), - ) - // Keys - .route( - "/api/key/:seconds_valid_or_key", - post(generate_auth_key_handler) - .with_state(tracker.clone()) - .delete(delete_auth_key_handler) - .with_state(tracker.clone()), - ) - // Keys command - .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) - .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)); + let app = router(tracker); let handle = Handle::new(); let shutdown_handle = handle.clone(); From 517ffde147ebbc4040967803af61f387a7c85722 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 12:21:58 +0000 Subject: [PATCH 247/435] fix(api): [#143] fix new Axum API enpoint when URL params are invalid --- src/apis/routes.rs | 127 ++++++++++++++++++++++++++------------- src/tracker/auth.rs | 31 ++++++++++ src/tracker/mod.rs | 1 + tests/api/asserts.rs | 12 ++++ tests/tracker_api.rs | 138 +++++++++++++++++++++++++++++++++++++++++-- 5 files changed, 261 insertions(+), 48 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index e46e3653d..60f5f9da0 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -16,6 +16,7 @@ use crate::api::resource::auth_key::AuthKey; use crate::api::resource::stats::Stats; use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth::KeyId; use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; @@ -37,7 +38,7 @@ pub fn router(tracker: &Arc) -> Router { ) .route( "/api/whitelist/:info_hash", - delete(delete_torrent_from_whitelist_handler).with_state(tracker.clone()), + delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), ) // Whitelist command .route( @@ -68,6 +69,19 @@ pub enum ActionStatus<'a> { Err { reason: std::borrow::Cow<'a, str> }, } +// Resource responses + +fn response_auth_key(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + +// OK response + fn response_ok() -> Response { ( StatusCode::OK, @@ -77,20 +91,29 @@ fn response_ok() -> Response { .into_response() } -fn response_err(reason: String) -> Response { +// Error responses + +fn response_invalid_info_hash_param(info_hash: &str) -> Response { + response_bad_request(&format!( + "Invalid URL: invalid infohash param: string \"{}\", expected expected a 40 character long string", + info_hash + )) +} + +fn response_bad_request(body: &str) -> Response { ( - StatusCode::INTERNAL_SERVER_ERROR, + StatusCode::BAD_REQUEST, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), + body.to_owned(), ) .into_response() } -fn response_auth_key(auth_key: &AuthKey) -> Response { +fn response_err(reason: String) -> Response { ( - StatusCode::OK, - [(header::CONTENT_TYPE, "application/json; charset=utf-8")], - serde_json::to_string(auth_key).unwrap(), + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), ) .into_response() } @@ -99,15 +122,22 @@ pub async fn get_stats_handler(State(tracker): State>) -> Json>, Path(info_hash): Path) -> Response { - let optional_torrent_info = get_torrent_info(tracker.clone(), &InfoHash::from_str(&info_hash).unwrap()).await; +#[derive(Deserialize)] +pub struct InfoHashParam(String); + +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); - match optional_torrent_info { - Some(info) => Json(Torrent::from(info)).into_response(), - None => Json(json!("torrent not known")).into_response(), + match parsing_info_hash_result { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => { + let optional_torrent_info = get_torrent_info(tracker.clone(), &info_hash).await; + + match optional_torrent_info { + Some(info) => Json(Torrent::from(info)).into_response(), + None => Json(json!("torrent not known")).into_response(), + } + } } } @@ -131,32 +161,33 @@ pub async fn get_torrents_handler( )) } -/// # Panics -/// -/// Will panic if it can't parse the infohash in the request -pub async fn add_torrent_to_whitelist_handler(State(tracker): State>, Path(info_hash): Path) -> Response { - match tracker - .add_torrent_to_whitelist(&InfoHash::from_str(&info_hash).unwrap()) - .await - { - Ok(..) => response_ok(), - Err(..) => response_err("failed to whitelist torrent".to_string()), +pub async fn add_torrent_to_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); + + match parsing_info_hash_result { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(..) => response_ok(), + Err(..) => response_err("failed to whitelist torrent".to_string()), + }, } } -/// # Panics -/// -/// Will panic if it can't parse the infohash in the request -pub async fn delete_torrent_from_whitelist_handler( +pub async fn remove_torrent_from_whitelist_handler( State(tracker): State>, - Path(info_hash): Path, + Path(info_hash): Path, ) -> Response { - match tracker - .remove_torrent_from_whitelist(&InfoHash::from_str(&info_hash).unwrap()) - .await - { - Ok(..) => response_ok(), - Err(..) => response_err("failed to remove torrent from whitelist".to_string()), + let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); + + match parsing_info_hash_result { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(..) => response_ok(), + Err(..) => response_err("failed to remove torrent from whitelist".to_string()), + }, } } @@ -168,16 +199,28 @@ pub async fn reload_whitelist_handler(State(tracker): State>) -> Re } pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { - match tracker.generate_auth_key(Duration::from_secs(seconds_valid_or_key)).await { + let seconds_valid = seconds_valid_or_key; + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), Err(_) => response_err("failed to generate key".to_string()), } } -pub async fn delete_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { - match tracker.remove_auth_key(&seconds_valid_or_key).await { - Ok(_) => response_ok(), - Err(_) => response_err("failed to delete key".to_string()), +#[derive(Deserialize)] +pub struct KeyIdParam(String); + +pub async fn delete_auth_key_handler( + State(tracker): State>, + Path(seconds_valid_or_key): Path, +) -> Response { + let key_id = KeyId::from_str(&seconds_valid_or_key.0); + + match key_id { + Err(_) => response_bad_request(&format!("Invalid auth key id param \"{}\"", seconds_valid_or_key.0)), + Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { + Ok(_) => response_ok(), + Err(_) => response_err("failed to delete key".to_string()), + }, } } diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 406ef7033..c4062bc68 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -1,3 +1,4 @@ +use std::str::FromStr; use std::time::Duration; use derive_more::{Display, Error}; @@ -50,6 +51,8 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct Key { + // todo: replace key field definition with: + // pub key: KeyId, pub key: String, pub valid_until: Option, } @@ -77,6 +80,24 @@ impl Key { } } +#[derive(Debug, Display, PartialEq, Clone)] +pub struct KeyId(String); + +#[derive(Debug, PartialEq, Eq)] +pub struct ParseKeyIdError; + +impl FromStr for KeyId { + type Err = ParseKeyIdError; + + fn from_str(s: &str) -> Result { + if s.len() != AUTH_KEY_LENGTH { + return Err(ParseKeyIdError); + } + + Ok(Self(s.to_string())) + } +} + #[derive(Debug, Display, PartialEq, Eq, Error)] #[allow(dead_code)] pub enum Error { @@ -97,6 +118,7 @@ impl From for Error { #[cfg(test)] mod tests { + use std::str::FromStr; use std::time::Duration; use crate::protocol::clock::{Current, StoppedTime}; @@ -122,6 +144,15 @@ mod tests { assert_eq!(auth_key.unwrap().key, key_string); } + #[test] + fn auth_key_id_from_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let auth_key_id = auth::KeyId::from_str(key_string); + + assert!(auth_key_id.is_ok()); + assert_eq!(auth_key_id.unwrap().to_string(), key_string); + } + #[test] fn generate_valid_auth_key() { let auth_key = auth::generate(Duration::new(9999, 0)); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 50d006a3f..bbf49e237 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -95,6 +95,7 @@ impl Tracker { /// /// Will return a `key::Error` if unable to get any `auth_key`. pub async fn verify_auth_key(&self, auth_key: &auth::Key) -> Result<(), auth::Error> { + // todo: use auth::KeyId for the function argument `auth_key` match self.keys.read().await.get(&auth_key.key) { None => Err(auth::Error::KeyInvalid), Some(key) => auth::verify(key), diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 1bc067490..d708df58e 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -50,6 +50,18 @@ pub async fn assert_ok(response: Response) { // Error responses +pub async fn assert_bad_request(response: Response, body: &str) { + assert_eq!(response.status(), 400); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), body); +} + +pub async fn assert_method_not_allowed(response: Response) { + assert_eq!(response.status(), 405); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), "HTTP method not allowed"); +} + pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 72311e71c..f43db6255 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -120,7 +120,8 @@ mod tracker_api { use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{ - assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, + assert_bad_request, assert_method_not_allowed, assert_token_not_valid, assert_torrent_info, assert_torrent_list, + assert_torrent_not_known, assert_unauthorized, }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -209,6 +210,27 @@ mod tracker_api { .await; } + #[tokio::test] + async fn should_fail_getting_torrents_when_query_parameters_cannot_be_parsed() { + let api_server = start_default_api(&Version::Warp).await; + + let invalid_offset = "INVALID OFFSET"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; + + assert_bad_request(response, "Invalid query string").await; + + let invalid_limit = "INVALID LIMIT"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; + + assert_bad_request(response, "Invalid query string").await; + } + #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; @@ -266,6 +288,19 @@ mod tracker_api { assert_torrent_not_known(response).await; } + #[tokio::test] + async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_cannot_be_parsed() { + let api_server = start_default_api(&Version::Warp).await; + + let invalid_infohash = "INVALID INFOHASH"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_method_not_allowed(response).await; + } + #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let api_server = start_default_api(&Version::Warp).await; @@ -470,8 +505,8 @@ mod tracker_api { use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_auth_key, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_ok, - assert_token_not_valid, assert_unauthorized, + assert_auth_key, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_method_not_allowed, assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -517,6 +552,19 @@ mod tracker_api { assert_unauthorized(response).await; } + #[tokio::test] + async fn should_fail_generating_a_new_auth_key_when_the_key_duration_cannot_be_parsed() { + let api_server = start_default_api(&Version::Warp).await; + + let invalid_key_duration = -1; + + let response = Client::new(api_server.get_connection_info()) + .generate_auth_key(invalid_key_duration) + .await; + + assert_method_not_allowed(response).await; + } + #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { let api_server = start_default_api(&Version::Warp).await; @@ -549,6 +597,19 @@ mod tracker_api { assert_ok(response).await; } + #[tokio::test] + async fn should_fail_deleting_an_auth_key_when_the_key_id_cannot_be_parsed() { + let api_server = start_default_api(&Version::Warp).await; + + let invalid_auth_key_id = "INVALID AUTH KEY ID"; + + let response = Client::new(api_server.get_connection_info()) + .delete_auth_key(invalid_auth_key_id) + .await; + + assert_failed_to_delete_key(response).await; + } + #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { let api_server = start_default_api(&Version::Warp).await; @@ -768,7 +829,8 @@ mod tracker_apis { use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{ - assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, + assert_bad_request, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, + assert_unauthorized, }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -857,6 +919,27 @@ mod tracker_apis { .await; } + #[tokio::test] + async fn should_fail_getting_torrents_when_query_parameters_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; + + let invalid_offset = "INVALID OFFSET"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + + let invalid_limit = "INVALID LIMIT"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; @@ -914,6 +997,23 @@ mod tracker_apis { assert_torrent_not_known(response).await; } + #[tokio::test] + async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; + + let invalid_infohash = "INVALID INFOHASH"; + + let response = Client::new(api_server.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_bad_request( + response, + "Invalid URL: invalid infohash param: string \"INVALID INFOHASH\", expected expected a 40 character long string", + ) + .await; + } + #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let api_server = start_default_api(&Version::Axum).await; @@ -1118,8 +1218,8 @@ mod tracker_apis { use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_ok, assert_token_not_valid, assert_unauthorized, + assert_auth_key_utf8, assert_bad_request, assert_failed_to_delete_key, assert_failed_to_generate_key, + assert_failed_to_reload_keys, assert_ok, assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1165,6 +1265,19 @@ mod tracker_apis { assert_unauthorized(response).await; } + #[tokio::test] + async fn should_fail_generating_a_new_auth_key_when_the_key_duration_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; + + let invalid_key_duration = -1; + + let response = Client::new(api_server.get_connection_info()) + .generate_auth_key(invalid_key_duration) + .await; + + assert_bad_request(response, "Invalid URL: Cannot parse `\"-1\"` to a `u64`").await; + } + #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { let api_server = start_default_api(&Version::Axum).await; @@ -1197,6 +1310,19 @@ mod tracker_apis { assert_ok(response).await; } + #[tokio::test] + async fn should_fail_deleting_an_auth_key_when_the_key_id_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; + + let invalid_auth_key_id = "INVALID AUTH KEY ID"; + + let response = Client::new(api_server.get_connection_info()) + .delete_auth_key(invalid_auth_key_id) + .await; + + assert_bad_request(response, "Invalid auth key id param \"INVALID AUTH KEY ID\"").await; + } + #[tokio::test] async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { let api_server = start_default_api(&Version::Axum).await; From 2da0719966e4c8e42ce277698c9daad95a159423 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 13:37:04 +0000 Subject: [PATCH 248/435] test(api): [#143] add tests for authenticaation --- tests/api/client.rs | 30 +++++++++++++----- tests/tracker_api.rs | 74 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 8 deletions(-) diff --git a/tests/api/client.rs b/tests/api/client.rs index b073adefd..5b2072cec 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -117,14 +117,7 @@ impl Client { query.add_param(QueryParam::new("token", token)); }; - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .query(&ReqwestQuery::from(query)) - .send() - .await - .unwrap() + self.get_request_with_query(path, query).await } async fn post(&self, path: &str) -> Response { @@ -149,6 +142,27 @@ impl Client { format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) } + pub async fn get_request_with_query(&self, path: &str, params: Query) -> Response { + reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path)) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap() + } + + pub async fn get_request(&self, path: &str) -> Response { + reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path)) + .send() + .await + .unwrap() + } + fn query_with_token(&self) -> Query { match &self.connection_info.api_token { Some(token) => Query::with_token(token), diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index f43db6255..56dcdb240 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -753,6 +753,80 @@ mod tracker_apis { */ + mod authentication { + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::api::client::{Client, Query, QueryParam}; + use crate::api::server::start_default_api; + use crate::api::Version; + + #[tokio::test] + async fn should_authenticate_requests_by_using_a_token_query_param() { + let api_server = start_default_api(&Version::Axum).await; + + let token = api_server.get_connection_info().api_token.unwrap(); + + let response = Client::new(api_server.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) + .await; + + assert_eq!(response.status(), 200); + } + + #[tokio::test] + async fn should_not_authenticate_requests_when_the_token_is_missing() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(api_server.get_connection_info()) + .get_request_with_query("stats", Query::default()) + .await; + + assert_unauthorized(response).await; + } + + #[tokio::test] + async fn should_not_authenticate_requests_when_the_token_is_empty() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(api_server.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) + .await; + + assert_token_not_valid(response).await; + } + + #[tokio::test] + async fn should_not_authenticate_requests_when_the_token_is_invalid() { + let api_server = start_default_api(&Version::Axum).await; + + let response = Client::new(api_server.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) + .await; + + assert_token_not_valid(response).await; + } + + #[tokio::test] + async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { + let api_server = start_default_api(&Version::Axum).await; + + let token = api_server.get_connection_info().api_token.unwrap(); + + // At the beginning of the query component + let response = Client::new(api_server.get_connection_info()) + .get_request(&format!("torrents?token={}&limit=1", &token)) + .await; + + assert_eq!(response.status(), 200); + + // At the end of the query component + let response = Client::new(api_server.get_connection_info()) + .get_request(&format!("torrents?limit=1&token={}", &token)) + .await; + + assert_eq!(response.status(), 200); + } + } + mod for_stats_resources { use std::str::FromStr; From 3bcbbc9a1c784276f6e6dad8dc3b27ca9f7adee7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 13:43:30 +0000 Subject: [PATCH 249/435] refactor(api): [#143] normalize test names for errrors --- tests/tracker_api.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 56dcdb240..ec4d1f2eb 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -391,7 +391,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_whitelisted() { + async fn should_fail_when_the_torrent_cannot_be_whitelisted() { let api_server = start_default_api(&Version::Warp).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -422,7 +422,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -484,7 +484,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_whitelist_cannot_be_reloaded_from_the_database() { + async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let api_server = start_default_api(&Version::Warp).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -566,7 +566,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { + async fn should_fail_when_the_auth_key_cannot_be_generated() { let api_server = start_default_api(&Version::Warp).await; force_database_error(&api_server.tracker); @@ -611,7 +611,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { + async fn should_fail_when_the_auth_key_cannot_be_deleted() { let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -680,7 +680,7 @@ mod tracker_api { } #[tokio::test] - async fn should_return_an_error_when_keys_cannot_be_reloaded() { + async fn should_fail_when_keys_cannot_be_reloaded() { let api_server = start_default_api(&Version::Warp).await; let seconds_valid = 60; @@ -1178,7 +1178,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_whitelisted() { + async fn should_fail_when_the_torrent_cannot_be_whitelisted() { let api_server = start_default_api(&Version::Axum).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1209,7 +1209,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_torrent_cannot_be_removed_from_the_whitelist() { + async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Axum).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1271,7 +1271,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_whitelist_cannot_be_reloaded_from_the_database() { + async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let api_server = start_default_api(&Version::Axum).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1353,7 +1353,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_auth_key_cannot_be_generated() { + async fn should_fail_when_the_auth_key_cannot_be_generated() { let api_server = start_default_api(&Version::Axum).await; force_database_error(&api_server.tracker); @@ -1398,7 +1398,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_the_auth_key_cannot_be_deleted() { + async fn should_fail_when_the_auth_key_cannot_be_deleted() { let api_server = start_default_api(&Version::Axum).await; let seconds_valid = 60; @@ -1467,7 +1467,7 @@ mod tracker_apis { } #[tokio::test] - async fn should_return_an_error_when_keys_cannot_be_reloaded() { + async fn should_fail_when_keys_cannot_be_reloaded() { let api_server = start_default_api(&Version::Axum).await; let seconds_valid = 60; From aa2a2ef4346044aabaf2c8bf0f7966b2b3f94bf8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 16:06:20 +0000 Subject: [PATCH 250/435] fix(api): [#143] do not fail trying to remove a whitelisted torrent twice Previous behavior: When you try to remove a non exisinting whitelisted torrent the response is 500 error. New bahavior: The enpoints checks if the torrent is included in the whitelist. If it does not, then it ignores the reqeust returning a 200 code. It should return a 204 or 404 but the current API only uses these codes: 200, 400, 405, 500. In the new API version we are planning to refctor all endpoints. --- src/tracker/mod.rs | 21 ++++++++++++++++++++- tests/tracker_api.rs | 26 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index bbf49e237..4f1dab49b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -152,11 +152,30 @@ impl Tracker { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.remove_torrent_from_database_whitelist(info_hash).await?; + self.remove_torrent_from_memory_whitelist(info_hash).await; + Ok(()) + } + + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + + if !is_whitelisted { + return Ok(()); + } + self.database.remove_info_hash_from_whitelist(*info_hash).await?; - self.whitelist.write().await.remove(info_hash); + Ok(()) } + pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.remove(info_hash) + } + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { self.whitelist.read().await.contains(info_hash) } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index ec4d1f2eb..f959f67b9 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -421,6 +421,19 @@ mod tracker_api { assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } + #[tokio::test] + async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { + let api_server = start_default_api(&Version::Warp).await; + + let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) + .await; + + assert_ok(response).await; + } + #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Warp).await; @@ -1208,6 +1221,19 @@ mod tracker_apis { assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); } + #[tokio::test] + async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { + let api_server = start_default_api(&Version::Axum).await; + + let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) + .await; + + assert_ok(response).await; + } + #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Axum).await; From 39c15c6bcf4690f16617cb92a3fd1094a9e71192 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jan 2023 19:36:05 +0000 Subject: [PATCH 251/435] test(api): [#143] add test for invalid infohash URL path param --- src/apis/routes.rs | 4 +- src/protocol/info_hash.rs | 4 +- tests/api/asserts.rs | 18 ++++++ tests/tracker_api.rs | 126 ++++++++++++++++++++++++++++++-------- 4 files changed, 124 insertions(+), 28 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 60f5f9da0..378aca929 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -42,7 +42,7 @@ pub fn router(tracker: &Arc) -> Router { ) // Whitelist command .route( - "/api/whitelist/:info_hash", + "/api/whitelist/reload", get(reload_whitelist_handler).with_state(tracker.clone()), ) // Keys @@ -95,7 +95,7 @@ fn response_ok() -> Response { fn response_invalid_info_hash_param(info_hash: &str) -> Response { response_bad_request(&format!( - "Invalid URL: invalid infohash param: string \"{}\", expected expected a 40 character long string", + "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", info_hash )) } diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 9a0900063..3d2fad1a5 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -77,7 +77,7 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { if v.len() != 40 { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(v), - &"expected a 40 character long string", + &"a 40 character long string", )); } @@ -86,7 +86,7 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(v), - &"expected a hexadecimal string", + &"a hexadecimal string", )); }; Ok(res) diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index d708df58e..23d76f159 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -56,6 +56,13 @@ pub async fn assert_bad_request(response: Response, body: &str) { assert_eq!(response.text().await.unwrap(), body); } +pub async fn assert_not_found(response: Response) { + assert_eq!(response.status(), 404); + // todo: missing header + //assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), ""); +} + pub async fn assert_method_not_allowed(response: Response) { assert_eq!(response.status(), 405); assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); @@ -68,6 +75,17 @@ pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); } +pub async fn assert_invalid_infohash(response: Response, invalid_infohash: &str) { + assert_bad_request( + response, + &format!( + "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", + invalid_infohash + ), + ) + .await; +} + pub async fn assert_token_not_valid(response: Response) { assert_unhandled_rejection(response, "token not valid").await; } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index f959f67b9..a5cb0cc4d 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -766,6 +766,26 @@ mod tracker_apis { */ + // When these infohashes are used in URL path params + // the response is a custom response returned in the handler + fn invalid_infohashes_returning_bad_request() -> Vec { + [ + "0".to_string(), + "-1".to_string(), + "1.1".to_string(), + "INVALID INFOHASH".to_string(), + "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40 + "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char + ] + .to_vec() + } + + // When these infohashes are used in URL path params + // the response is an Axum response returned in the handler + fn invalid_infohashes_returning_not_found() -> Vec { + [String::new(), " ".to_string()].to_vec() + } + mod authentication { use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::{Client, Query, QueryParam}; @@ -915,9 +935,10 @@ mod tracker_apis { use torrust_tracker::api::resource::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; + use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ - assert_bad_request, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, - assert_unauthorized, + assert_bad_request, assert_invalid_infohash, assert_not_found, assert_token_not_valid, assert_torrent_info, + assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; use crate::api::client::{Client, Query, QueryParam}; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1007,24 +1028,33 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_getting_torrents_when_query_parameters_cannot_be_parsed() { + async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { let api_server = start_default_api(&Version::Axum).await; - let invalid_offset = "INVALID OFFSET"; + let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) - .await; + for invalid_offset in &invalid_offsets { + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + } - let invalid_limit = "INVALID LIMIT"; + #[tokio::test] + async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { + let api_server = start_default_api(&Version::Axum).await; - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) - .await; + let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; + + for invalid_limit in &invalid_limits { + let response = Client::new(api_server.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } } #[tokio::test] @@ -1085,20 +1115,24 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_cannot_be_parsed() { + async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { let api_server = start_default_api(&Version::Axum).await; - let invalid_infohash = "INVALID INFOHASH"; + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(api_server.get_connection_info()) + .get_torrent(invalid_infohash) + .await; - let response = Client::new(api_server.get_connection_info()) - .get_torrent(invalid_infohash) - .await; + assert_invalid_infohash(response, invalid_infohash).await; + } - assert_bad_request( - response, - "Invalid URL: invalid infohash param: string \"INVALID INFOHASH\", expected expected a 40 character long string", - ) - .await; + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(api_server.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } } #[tokio::test] @@ -1128,9 +1162,11 @@ mod tracker_apis { use torrust_tracker::protocol::info_hash::InfoHash; + use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_ok, assert_token_not_valid, assert_unauthorized, + assert_failed_to_whitelist_torrent, assert_invalid_infohash, assert_not_found, assert_ok, assert_token_not_valid, + assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1205,6 +1241,27 @@ mod tracker_apis { assert_failed_to_whitelist_torrent(response).await; } + #[tokio::test] + async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { + let api_server = start_default_api(&Version::Axum).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(api_server.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_invalid_infohash(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(api_server.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } + } + #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { let api_server = start_default_api(&Version::Axum).await; @@ -1234,6 +1291,27 @@ mod tracker_apis { assert_ok(response).await; } + #[tokio::test] + async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { + let api_server = start_default_api(&Version::Axum).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_invalid_infohash(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(api_server.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_not_found(response).await; + } + } + #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let api_server = start_default_api(&Version::Axum).await; From 2c222ee906e7e52dd01a41d75f105a67233172c9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 14:17:07 +0000 Subject: [PATCH 252/435] test(api): [#143] add test for invalid key duration URL path param --- src/apis/routes.rs | 20 ++++++++++++++++++++ tests/api/client.rs | 4 +++- tests/tracker_api.rs | 22 ++++++++++++++++------ 3 files changed, 39 insertions(+), 7 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 378aca929..5b4fb7e26 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -21,6 +21,26 @@ use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; +/* code-review: + When Axum cannot parse a path or query param it shows a message like this: + + For the "seconds_valid_or_key" path param: + + "Invalid URL: Cannot parse "-1" to a `u64`" + + That message is not an informative message, specially if you have more than one param. + We should show a message similar to the one we use when we parse the value in the handler. + For example: + + "Invalid URL: invalid infohash param: string \"INVALID VALUE\", expected a 40 character long string" + + We can customize the error message by using a custom type with custom serde deserialization. + The same we are using for the "InfoHashVisitor". + + Input data from HTTP requests should use struts with primitive types (first level of validation). + We can put the second level of validation in the application and domain services. +*/ + pub fn router(tracker: &Arc) -> Router { Router::new() // Stats diff --git a/tests/api/client.rs b/tests/api/client.rs index 5b2072cec..b0b864ff5 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -120,7 +120,7 @@ impl Client { self.get_request_with_query(path, query).await } - async fn post(&self, path: &str) -> Response { + pub async fn post(&self, path: &str) -> Response { reqwest::Client::new() .post(self.base_url(path).clone()) .query(&ReqwestQuery::from(self.query_with_token())) @@ -142,6 +142,7 @@ impl Client { format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) } + // Unauthenticated GET request with query component pub async fn get_request_with_query(&self, path: &str, params: Query) -> Response { reqwest::Client::builder() .build() @@ -153,6 +154,7 @@ impl Client { .unwrap() } + // Unauthenticated GET request pub async fn get_request(&self, path: &str) -> Response { reqwest::Client::builder() .build() diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index a5cb0cc4d..6c7de84f0 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1444,16 +1444,26 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_generating_a_new_auth_key_when_the_key_duration_cannot_be_parsed() { + async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { let api_server = start_default_api(&Version::Axum).await; - let invalid_key_duration = -1; + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; - let response = Client::new(api_server.get_connection_info()) - .generate_auth_key(invalid_key_duration) - .await; + for invalid_key_duration in &invalid_key_durations { + let response = Client::new(api_server.get_connection_info()) + .post(&format!("key/{}", &invalid_key_duration)) + .await; - assert_bad_request(response, "Invalid URL: Cannot parse `\"-1\"` to a `u64`").await; + assert_bad_request( + response, + &format!("Invalid URL: Cannot parse `\"{invalid_key_duration}\"` to a `u64`"), + ) + .await; + } } #[tokio::test] From 072f3d7d42350ae157817843779499d7bf09587a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 14:25:59 +0000 Subject: [PATCH 253/435] test(api): [#143] add more tests for invalid key id URL path param --- tests/tracker_api.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 6c7de84f0..695c9dc9e 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1499,16 +1499,26 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_id_cannot_be_parsed() { + async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { let api_server = start_default_api(&Version::Axum).await; - let invalid_auth_key_id = "INVALID AUTH KEY ID"; + let invalid_auth_key_ids = [ + // "", it returns a 404 + // " ", it returns a 404 + "0", + "-1", + "INVALID AUTH KEY ID", + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8", // 32 char key cspell:disable-line + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line + ]; - let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(invalid_auth_key_id) - .await; + for invalid_auth_key_id in &invalid_auth_key_ids { + let response = Client::new(api_server.get_connection_info()) + .delete_auth_key(invalid_auth_key_id) + .await; - assert_bad_request(response, "Invalid auth key id param \"INVALID AUTH KEY ID\"").await; + assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key_id)).await; + } } #[tokio::test] From 8d32628e6e80bd5429917ea12e6e815577b0f203 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 16:27:33 +0000 Subject: [PATCH 254/435] refactor(api): [#143] extract and rename functions --- src/apis/routes.rs | 98 +++++++++++++++++++++++++++++--------------- tests/api/asserts.rs | 22 +++++++--- tests/tracker_api.rs | 25 +++++------ 3 files changed, 93 insertions(+), 52 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 5b4fb7e26..f0585b225 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -17,8 +17,8 @@ use crate::api::resource::stats::Stats; use crate::api::resource::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::KeyId; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, BasicInfo, Info, Pagination}; use crate::tracker::Tracker; /* code-review: @@ -91,6 +91,18 @@ pub enum ActionStatus<'a> { // Resource responses +fn response_stats(tracker_metrics: TrackerMetrics) -> Json { + Json(Stats::from(tracker_metrics)) +} + +fn response_torrent_list(basic_infos: &[BasicInfo]) -> Json> { + Json(ListItem::new_vec(basic_infos)) +} + +fn response_torrent_info(info: Info) -> Response { + Json(Torrent::from(info)).into_response() +} + fn response_auth_key(auth_key: &AuthKey) -> Response { ( StatusCode::OK, @@ -120,6 +132,10 @@ fn response_invalid_info_hash_param(info_hash: &str) -> Response { )) } +fn response_invalid_auth_key_param(invalid_key: &str) -> Response { + response_bad_request(&format!("Invalid auth key id param \"{invalid_key}\"")) +} + fn response_bad_request(body: &str) -> Response { ( StatusCode::BAD_REQUEST, @@ -129,7 +145,35 @@ fn response_bad_request(body: &str) -> Response { .into_response() } -fn response_err(reason: String) -> Response { +fn response_torrent_not_known() -> Response { + Json(json!("torrent not known")).into_response() +} + +fn response_failed_to_remove_torrent_from_whitelist() -> Response { + response_unhandled_rejection("failed to remove torrent from whitelist".to_string()) +} + +fn response_failed_to_whitelist_torrent() -> Response { + response_unhandled_rejection("failed to whitelist torrent".to_string()) +} + +fn response_failed_to_reload_whitelist() -> Response { + response_unhandled_rejection("failed to reload whitelist".to_string()) +} + +fn response_failed_to_generate_key() -> Response { + response_unhandled_rejection("failed to generate key".to_string()) +} + +fn response_failed_to_delete_key() -> Response { + response_unhandled_rejection("failed to delete key".to_string()) +} + +fn response_failed_to_reload_keys() -> Response { + response_unhandled_rejection("failed to reload keys".to_string()) +} + +fn response_unhandled_rejection(reason: String) -> Response { ( StatusCode::INTERNAL_SERVER_ERROR, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], @@ -139,25 +183,19 @@ fn response_err(reason: String) -> Response { } pub async fn get_stats_handler(State(tracker): State>) -> Json { - Json(Stats::from(get_metrics(tracker.clone()).await)) + response_stats(get_metrics(tracker.clone()).await) } #[derive(Deserialize)] pub struct InfoHashParam(String); pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { - let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); - - match parsing_info_hash_result { + match InfoHash::from_str(&info_hash.0) { Err(_) => response_invalid_info_hash_param(&info_hash.0), - Ok(info_hash) => { - let optional_torrent_info = get_torrent_info(tracker.clone(), &info_hash).await; - - match optional_torrent_info { - Some(info) => Json(Torrent::from(info)).into_response(), - None => Json(json!("torrent not known")).into_response(), - } - } + Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { + Some(info) => response_torrent_info(info), + None => response_torrent_not_known(), + }, } } @@ -172,26 +210,24 @@ pub async fn get_torrents_handler( State(tracker): State>, pagination: Query, ) -> Json> { - Json(ListItem::new_vec( + response_torrent_list( &get_torrents( tracker.clone(), &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), ) .await, - )) + ) } pub async fn add_torrent_to_whitelist_handler( State(tracker): State>, Path(info_hash): Path, ) -> Response { - let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); - - match parsing_info_hash_result { + match InfoHash::from_str(&info_hash.0) { Err(_) => response_invalid_info_hash_param(&info_hash.0), Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { Ok(..) => response_ok(), - Err(..) => response_err("failed to whitelist torrent".to_string()), + Err(..) => response_failed_to_whitelist_torrent(), }, } } @@ -200,13 +236,11 @@ pub async fn remove_torrent_from_whitelist_handler( State(tracker): State>, Path(info_hash): Path, ) -> Response { - let parsing_info_hash_result = InfoHash::from_str(&info_hash.0); - - match parsing_info_hash_result { + match InfoHash::from_str(&info_hash.0) { Err(_) => response_invalid_info_hash_param(&info_hash.0), Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { Ok(..) => response_ok(), - Err(..) => response_err("failed to remove torrent from whitelist".to_string()), + Err(..) => response_failed_to_remove_torrent_from_whitelist(), }, } } @@ -214,7 +248,7 @@ pub async fn remove_torrent_from_whitelist_handler( pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { match tracker.load_whitelist().await { Ok(..) => response_ok(), - Err(..) => response_err("failed to reload whitelist".to_string()), + Err(..) => response_failed_to_reload_whitelist(), } } @@ -222,7 +256,7 @@ pub async fn generate_auth_key_handler(State(tracker): State>, Path let seconds_valid = seconds_valid_or_key; match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), - Err(_) => response_err("failed to generate key".to_string()), + Err(_) => response_failed_to_generate_key(), } } @@ -233,13 +267,11 @@ pub async fn delete_auth_key_handler( State(tracker): State>, Path(seconds_valid_or_key): Path, ) -> Response { - let key_id = KeyId::from_str(&seconds_valid_or_key.0); - - match key_id { - Err(_) => response_bad_request(&format!("Invalid auth key id param \"{}\"", seconds_valid_or_key.0)), + match KeyId::from_str(&seconds_valid_or_key.0) { + Err(_) => response_invalid_auth_key_param(&seconds_valid_or_key.0), Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { Ok(_) => response_ok(), - Err(_) => response_err("failed to delete key".to_string()), + Err(_) => response_failed_to_delete_key(), }, } } @@ -247,7 +279,7 @@ pub async fn delete_auth_key_handler( pub async fn reload_keys_handler(State(tracker): State>) -> Response { match tracker.load_keys().await { Ok(..) => response_ok(), - Err(..) => response_err("failed to reload keys".to_string()), + Err(..) => response_failed_to_reload_keys(), } } diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 23d76f159..d1730fd9b 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -75,7 +75,7 @@ pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); } -pub async fn assert_invalid_infohash(response: Response, invalid_infohash: &str) { +pub async fn assert_invalid_infohash_param(response: Response, invalid_infohash: &str) { assert_bad_request( response, &format!( @@ -86,6 +86,18 @@ pub async fn assert_invalid_infohash(response: Response, invalid_infohash: &str) .await; } +pub async fn assert_invalid_auth_key_param(response: Response, invalid_auth_key: &str) { + assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key)).await; +} + +pub async fn assert_invalid_key_duration_param(response: Response, invalid_key_duration: &str) { + assert_bad_request( + response, + &format!("Invalid URL: Cannot parse `\"{invalid_key_duration}\"` to a `u64`"), + ) + .await; +} + pub async fn assert_token_not_valid(response: Response) { assert_unhandled_rejection(response, "token not valid").await; } @@ -102,6 +114,10 @@ pub async fn assert_failed_to_whitelist_torrent(response: Response) { assert_unhandled_rejection(response, "failed to whitelist torrent").await; } +pub async fn assert_failed_to_reload_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to reload whitelist").await; +} + pub async fn assert_failed_to_generate_key(response: Response) { assert_unhandled_rejection(response, "failed to generate key").await; } @@ -110,10 +126,6 @@ pub async fn assert_failed_to_delete_key(response: Response) { assert_unhandled_rejection(response, "failed to delete key").await; } -pub async fn assert_failed_to_reload_whitelist(response: Response) { - assert_unhandled_rejection(response, "failed to reload whitelist").await; -} - pub async fn assert_failed_to_reload_keys(response: Response) { assert_unhandled_rejection(response, "failed to reload keys").await; } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 695c9dc9e..b41eb5c29 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -937,7 +937,7 @@ mod tracker_apis { use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ - assert_bad_request, assert_invalid_infohash, assert_not_found, assert_token_not_valid, assert_torrent_info, + assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; use crate::api::client::{Client, Query, QueryParam}; @@ -1123,7 +1123,7 @@ mod tracker_apis { .get_torrent(invalid_infohash) .await; - assert_invalid_infohash(response, invalid_infohash).await; + assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { @@ -1165,8 +1165,8 @@ mod tracker_apis { use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_invalid_infohash, assert_not_found, assert_ok, assert_token_not_valid, - assert_unauthorized, + assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, + assert_token_not_valid, assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1250,7 +1250,7 @@ mod tracker_apis { .whitelist_a_torrent(invalid_infohash) .await; - assert_invalid_infohash(response, invalid_infohash).await; + assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { @@ -1300,7 +1300,7 @@ mod tracker_apis { .remove_torrent_from_whitelist(invalid_infohash) .await; - assert_invalid_infohash(response, invalid_infohash).await; + assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { @@ -1396,8 +1396,9 @@ mod tracker_apis { use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ - assert_auth_key_utf8, assert_bad_request, assert_failed_to_delete_key, assert_failed_to_generate_key, - assert_failed_to_reload_keys, assert_ok, assert_token_not_valid, assert_unauthorized, + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, + assert_unauthorized, }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; @@ -1458,11 +1459,7 @@ mod tracker_apis { .post(&format!("key/{}", &invalid_key_duration)) .await; - assert_bad_request( - response, - &format!("Invalid URL: Cannot parse `\"{invalid_key_duration}\"` to a `u64`"), - ) - .await; + assert_invalid_key_duration_param(response, invalid_key_duration).await; } } @@ -1517,7 +1514,7 @@ mod tracker_apis { .delete_auth_key(invalid_auth_key_id) .await; - assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key_id)).await; + assert_invalid_auth_key_param(response, invalid_auth_key_id).await; } } From 337e12eef5a205bb772dbda24b0f5bb4c42df45a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 16:36:15 +0000 Subject: [PATCH 255/435] feat(api): [#143] replace Warp API with Axum implementation --- src/setup.rs | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/src/setup.rs b/src/setup.rs index daee7eea8..e7535e67d 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -1,11 +1,10 @@ -use std::net::SocketAddr; use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; use crate::config::Configuration; -use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, tracker_apis, udp_tracker}; +use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::tracker; /// # Panics @@ -51,24 +50,9 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone())); } - // Start HTTP API server + // Start HTTP API if config.http_api.enabled { - jobs.push(tracker_api::start_job(&config.http_api, tracker.clone()).await); - } - - // Start HTTP APIs server (multiple API versions) - if config.http_api.enabled { - // Temporarily running the new API in the 1313 port - let bind_address = config.http_api.bind_address.clone(); - let mut bind_socket: SocketAddr = bind_address - .parse() - .expect("bind address should be a valid socket address, for example 127.0.0.1:8080"); - bind_socket.set_port(1313); - - let mut http_apis_config = config.http_api.clone(); - http_apis_config.bind_address = bind_socket.to_string(); - - jobs.push(tracker_apis::start_job(&http_apis_config, tracker.clone()).await); + jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); } // Remove torrents without peers, every interval From 77ec52184d4ad2eaaf22dfc0838e1568013b40ee Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 16:55:09 +0000 Subject: [PATCH 256/435] refactor(api): [#143] remove Warp API implementation --- src/api/mod.rs | 19 - src/api/routes.rs | 230 ----------- src/api/server.rs | 32 -- src/jobs/mod.rs | 1 - src/jobs/tracker_api.rs | 50 --- tests/api/asserts.rs | 14 +- tests/api/mod.rs | 5 - tests/api/server.rs | 29 +- tests/tracker_api.rs | 817 +++------------------------------------- 9 files changed, 47 insertions(+), 1150 deletions(-) delete mode 100644 src/api/routes.rs delete mode 100644 src/api/server.rs delete mode 100644 src/jobs/tracker_api.rs diff --git a/src/api/mod.rs b/src/api/mod.rs index d254c91ac..c6bee0532 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,20 +1 @@ pub mod resource; -pub mod routes; -pub mod server; - -use serde::{Deserialize, Serialize}; - -#[derive(Deserialize, Debug)] -pub struct TorrentInfoQuery { - offset: Option, - limit: Option, -} - -#[derive(Serialize, Debug)] -#[serde(tag = "status", rename_all = "snake_case")] -enum ActionStatus<'a> { - Ok, - Err { reason: std::borrow::Cow<'a, str> }, -} - -impl warp::reject::Reject for ActionStatus<'static> {} diff --git a/src/api/routes.rs b/src/api/routes.rs deleted file mode 100644 index 4280cdb35..000000000 --- a/src/api/routes.rs +++ /dev/null @@ -1,230 +0,0 @@ -use std::cmp::min; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use std::time::Duration; - -use serde::Deserialize; -use warp::{filters, reply, Filter}; - -use super::resource::auth_key::AuthKey; -use super::resource::stats::Stats; -use super::resource::torrent::{ListItem, Torrent}; -use super::{ActionStatus, TorrentInfoQuery}; -use crate::protocol::info_hash::InfoHash; -use crate::tracker; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; - -fn authenticate(tokens: HashMap) -> impl Filter + Clone { - #[derive(Deserialize)] - struct AuthToken { - token: Option, - } - - let tokens: HashSet = tokens.into_values().collect(); - - let tokens = Arc::new(tokens); - warp::filters::any::any() - .map(move || tokens.clone()) - .and(filters::query::query::()) - .and_then(|tokens: Arc>, token: AuthToken| async move { - match token.token { - Some(token) => { - if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { - reason: "token not valid".into(), - })); - } - - Ok(()) - } - None => Err(warp::reject::custom(ActionStatus::Err { - reason: "unauthorized".into(), - })), - } - }) - .untuple_one() -} - -#[allow(clippy::too_many_lines)] -#[must_use] -pub fn routes(tracker: &Arc) -> impl Filter + Clone { - // GET /api/torrents?offset=:u32&limit=:u32 - // View torrent list - let api_torrents = tracker.clone(); - let view_torrent_list = filters::method::get() - .and(filters::path::path("torrents")) - .and(filters::path::end()) - .and(filters::query::query()) - .map(move |limits| { - let tracker = api_torrents.clone(); - (limits, tracker) - }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { - let offset = limits.offset.unwrap_or(0); - let limit = min(limits.limit.unwrap_or(1000), 4000); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&ListItem::new_vec( - &get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await, - ))) - }); - - // GET /api/stats - // View tracker status - let api_stats = tracker.clone(); - let view_stats_list = filters::method::get() - .and(filters::path::path("stats")) - .and(filters::path::end()) - .map(move || api_stats.clone()) - .and_then(|tracker: Arc| async move { - Result::<_, warp::reject::Rejection>::Ok(reply::json(&Stats::from(get_metrics(tracker.clone()).await))) - }); - - // GET /api/torrent/:info_hash - // View torrent info - let t2 = tracker.clone(); - let view_torrent_info = filters::method::get() - .and(filters::path::path("torrent")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t2.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - let optional_torrent_info = get_torrent_info(tracker.clone(), &info_hash).await; - - match optional_torrent_info { - Some(info) => Ok(reply::json(&Torrent::from(info))), - None => Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")), - } - }); - - // DELETE /api/whitelist/:info_hash - // Delete info hash from whitelist - let t3 = tracker.clone(); - let delete_torrent = filters::method::delete() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t3.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to remove torrent from whitelist".into(), - })), - } - }); - - // POST /api/whitelist/:info_hash - // Add info hash to whitelist - let t4 = tracker.clone(); - let add_torrent = filters::method::post() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t4.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to whitelist torrent".into(), - })), - } - }); - - // POST /api/key/:seconds_valid - // Generate new key - let t5 = tracker.clone(); - let create_key = filters::method::post() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |seconds_valid: u64| { - let tracker = t5.clone(); - (seconds_valid, tracker) - }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to generate key".into(), - })), - } - }); - - // DELETE /api/key/:key - // Delete key - let t6 = tracker.clone(); - let delete_key = filters::method::delete() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |key: String| { - let tracker = t6.clone(); - (key, tracker) - }) - .and_then(|(key, tracker): (String, Arc)| async move { - match tracker.remove_auth_key(&key).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to delete key".into(), - })), - } - }); - - // GET /api/whitelist/reload - // Reload whitelist - let t7 = tracker.clone(); - let reload_whitelist = filters::method::get() - .and(filters::path::path("whitelist")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t7.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_whitelist().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload whitelist".into(), - })), - } - }); - - // GET /api/keys/reload - // Reload whitelist - let t8 = tracker.clone(); - let reload_keys = filters::method::get() - .and(filters::path::path("keys")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t8.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_keys().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload keys".into(), - })), - } - }); - - let api_routes = filters::path::path("api").and( - view_torrent_list - .or(delete_torrent) - .or(view_torrent_info) - .or(view_stats_list) - .or(add_torrent) - .or(create_key) - .or(delete_key) - .or(reload_whitelist) - .or(reload_keys), - ); - - api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())) -} diff --git a/src/api/server.rs b/src/api/server.rs deleted file mode 100644 index 5d6a3cdfd..000000000 --- a/src/api/server.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use warp::serve; - -use super::routes::routes; -use crate::tracker; - -pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { - let (_addr, api_server) = serve(routes(tracker)).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - - api_server -} - -pub fn start_tls( - socket_addr: SocketAddr, - ssl_cert_path: String, - ssl_key_path: String, - tracker: &Arc, -) -> impl warp::Future { - let (_addr, api_server) = serve(routes(tracker)) - .tls() - .cert_path(ssl_cert_path) - .key_path(ssl_key_path) - .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - - api_server -} diff --git a/src/jobs/mod.rs b/src/jobs/mod.rs index 6f9b12bac..ba44a56ad 100644 --- a/src/jobs/mod.rs +++ b/src/jobs/mod.rs @@ -1,5 +1,4 @@ pub mod http_tracker; pub mod torrent_cleanup; -pub mod tracker_api; pub mod tracker_apis; pub mod udp_tracker; diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs deleted file mode 100644 index 211174f35..000000000 --- a/src/jobs/tracker_api.rs +++ /dev/null @@ -1,50 +0,0 @@ -use std::sync::Arc; - -use log::info; -use tokio::sync::oneshot; -use tokio::task::JoinHandle; - -use crate::api::server; -use crate::config::HttpApi; -use crate::tracker; - -#[derive(Debug)] -pub struct ApiServerJobStarted(); - -/// # Panics -/// -/// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - let (tx, rx) = oneshot::channel::(); - - // Run the API server - let join_handle = tokio::spawn(async move { - if !ssl_enabled { - info!("Starting Torrust API server on: http://{}", bind_addr); - let handle = server::start(bind_addr, &tracker); - tx.send(ApiServerJobStarted()).expect("the start job dropped"); - handle.await; - } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting Torrust API server on: https://{}", bind_addr); - let handle = server::start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap(), &tracker); - tx.send(ApiServerJobStarted()).expect("the start job dropped"); - handle.await; - } - }); - - // Wait until the API server job is running - match rx.await { - Ok(_msg) => info!("Torrust API server started"), - Err(e) => panic!("the api server dropped: {e}"), - } - - join_handle -} diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index d1730fd9b..11aac64d1 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -25,12 +25,6 @@ pub async fn assert_torrent_info(response: Response, torrent: Torrent) { assert_eq!(response.json::().await.unwrap(), torrent); } -pub async fn assert_auth_key(response: Response) -> AuthKey { - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - response.json::().await.unwrap() -} - pub async fn assert_auth_key_utf8(response: Response) -> AuthKey { assert_eq!(response.status(), 200); assert_eq!( @@ -58,17 +52,11 @@ pub async fn assert_bad_request(response: Response, body: &str) { pub async fn assert_not_found(response: Response) { assert_eq!(response.status(), 404); - // todo: missing header + // todo: missing header in the response //assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); assert_eq!(response.text().await.unwrap(), ""); } -pub async fn assert_method_not_allowed(response: Response) { - assert_eq!(response.status(), 405); - assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); - assert_eq!(response.text().await.unwrap(), "HTTP method not allowed"); -} - pub async fn assert_torrent_not_known(response: Response) { assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 1311a2356..bc4187375 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -8,11 +8,6 @@ pub mod connection_info; pub mod fixtures; pub mod server; -pub enum Version { - Warp, - Axum, -} - /// It forces a database error by dropping all tables. /// That makes any query fail. /// code-review: alternatively we could inject a database mock in the future. diff --git a/tests/api/server.rs b/tests/api/server.rs index 9819a0847..c1cd0630a 100644 --- a/tests/api/server.rs +++ b/tests/api/server.rs @@ -2,47 +2,26 @@ use core::panic; use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; -use torrust_tracker::jobs::{tracker_api, tracker_apis}; +use torrust_tracker::jobs::tracker_apis; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; -use super::Version; pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) } -pub async fn start_default_api(version: &Version) -> Server { +pub async fn start_default_api() -> Server { let configuration = tracker_configuration(); - start_custom_api(configuration.clone(), version).await + start_custom_api(configuration.clone()).await } -pub async fn start_custom_api(configuration: Arc, version: &Version) -> Server { - match &version { - Version::Warp => start_warp_api(configuration).await, - Version::Axum => start_axum_api(configuration).await, - } -} - -async fn start_warp_api(configuration: Arc) -> Server { +pub async fn start_custom_api(configuration: Arc) -> Server { let server = start(&configuration); - - // Start the HTTP API job - tracker_api::start_job(&configuration.http_api, server.tracker.clone()).await; - - server -} - -async fn start_axum_api(configuration: Arc) -> Server { - let server = start(&configuration); - - // Start HTTP APIs server (multiple API versions) - // Temporarily run the new API on a port number after the current API port tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; - server } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index b41eb5c29..d07d2fe2d 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1,742 +1,12 @@ /// Integration tests for the tracker API /// /// ```text -/// cargo test tracker_api -- --nocapture -/// ``` -/// -/// WIP. We are implementing a new API replacing Warp with Axum. -/// The new API runs in parallel until we finish all endpoints. -/// You can test the new API with: -/// -/// ```text /// cargo test tracker_apis -- --nocapture /// ``` extern crate rand; mod api; -mod tracker_api { - - /* - - Endpoints: - - Stats: - GET /api/stats - - Torrents: - GET /api/torrents?offset=:u32&limit=:u32 - GET /api/torrent/:info_hash - - Whitelisted torrents: - POST /api/whitelist/:info_hash - DELETE /api/whitelist/:info_hash - - Whitelist command: - GET /api/whitelist/reload - - Keys: - POST /api/key/:seconds_valid - DELETE /api/key/:key - - Keys command: - GET /api/keys/reload - - */ - - mod for_stats_resources { - use std::str::FromStr; - - use torrust_tracker::api::resource::stats::Stats; - use torrust_tracker::protocol::info_hash::InfoHash; - - use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::fixtures::sample_peer; - use crate::api::server::start_default_api; - use crate::api::Version; - - #[tokio::test] - async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api(&Version::Warp).await; - - api_server - .add_torrent( - &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), - &sample_peer(), - ) - .await; - - let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; - - assert_stats( - response, - Stats { - torrents: 1, - seeders: 1, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }, - ) - .await; - } - - #[tokio::test] - async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_tracker_statistics() - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .get_tracker_statistics() - .await; - - assert_unauthorized(response).await; - } - } - - mod for_torrent_resources { - use std::str::FromStr; - - use torrust_tracker::api::resource; - use torrust_tracker::api::resource::torrent::{self, Torrent}; - use torrust_tracker::protocol::info_hash::InfoHash; - - use crate::api::asserts::{ - assert_bad_request, assert_method_not_allowed, assert_token_not_valid, assert_torrent_info, assert_torrent_list, - assert_torrent_not_known, assert_unauthorized, - }; - use crate::api::client::{Client, Query, QueryParam}; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::fixtures::sample_peer; - use crate::api::server::start_default_api; - use crate::api::Version; - - #[tokio::test] - async fn should_allow_getting_torrents() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - api_server.add_torrent(&info_hash, &sample_peer()).await; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::empty()) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - } - - #[tokio::test] - async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api(&Version::Warp).await; - - // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - - api_server.add_torrent(&info_hash_1, &sample_peer()).await; - api_server.add_torrent(&info_hash_2, &sample_peer()).await; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - } - - #[tokio::test] - async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api(&Version::Warp).await; - - // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - - api_server.add_torrent(&info_hash_1, &sample_peer()).await; - api_server.add_torrent(&info_hash_2, &sample_peer()).await; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - } - - #[tokio::test] - async fn should_fail_getting_torrents_when_query_parameters_cannot_be_parsed() { - let api_server = start_default_api(&Version::Warp).await; - - let invalid_offset = "INVALID OFFSET"; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) - .await; - - assert_bad_request(response, "Invalid query string").await; - - let invalid_limit = "INVALID LIMIT"; - - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) - .await; - - assert_bad_request(response, "Invalid query string").await; - } - - #[tokio::test] - async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_torrents(Query::empty()) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .get_torrents(Query::default()) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let peer = sample_peer(); - - api_server.add_torrent(&info_hash, &peer).await; - - let response = Client::new(api_server.get_connection_info()) - .get_torrent(&info_hash.to_string()) - .await; - - assert_torrent_info( - response, - Torrent { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: Some(vec![resource::peer::Peer::from(peer)]), - }, - ) - .await; - } - - #[tokio::test] - async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let response = Client::new(api_server.get_connection_info()) - .get_torrent(&info_hash.to_string()) - .await; - - assert_torrent_not_known(response).await; - } - - #[tokio::test] - async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_cannot_be_parsed() { - let api_server = start_default_api(&Version::Warp).await; - - let invalid_infohash = "INVALID INFOHASH"; - - let response = Client::new(api_server.get_connection_info()) - .get_torrent(invalid_infohash) - .await; - - assert_method_not_allowed(response).await; - } - - #[tokio::test] - async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - api_server.add_torrent(&info_hash, &sample_peer()).await; - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_torrent(&info_hash.to_string()) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .get_torrent(&info_hash.to_string()) - .await; - - assert_unauthorized(response).await; - } - } - - mod for_whitelisted_torrent_resources { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - - use crate::api::asserts::{ - assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_ok, assert_token_not_valid, assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::server::start_default_api; - use crate::api::{force_database_error, Version}; - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(api_server.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_ok(response).await; - assert!( - api_server - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await - ); - } - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let api_client = Client::new(api_server.get_connection_info()); - - let response = api_client.whitelist_a_torrent(&info_hash).await; - assert_ok(response).await; - - let response = api_client.whitelist_a_torrent(&info_hash).await; - assert_ok(response).await; - } - - #[tokio::test] - async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .whitelist_a_torrent(&info_hash) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .whitelist_a_torrent(&info_hash) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let api_server = start_default_api(&Version::Warp).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_failed_to_whitelist_torrent(response).await; - } - - #[tokio::test] - async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - let response = Client::new(api_server.get_connection_info()) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_ok(response).await; - assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); - } - - #[tokio::test] - async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let api_server = start_default_api(&Version::Warp).await; - - let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(api_server.get_connection_info()) - .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) - .await; - - assert_ok(response).await; - } - - #[tokio::test] - async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_failed_to_remove_torrent_from_whitelist(response).await; - } - - #[tokio::test] - async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_token_not_valid(response).await; - - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; - - assert_ok(response).await; - /* todo: this assert fails because the whitelist has not been reloaded yet. - We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent - is whitelisted and use that endpoint to check if the torrent is still there after reloading. - assert!( - !(api_server - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await) - ); - */ - } - - #[tokio::test] - async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let api_server = start_default_api(&Version::Warp).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; - - assert_failed_to_reload_whitelist(response).await; - } - } - - mod for_key_resources { - use std::time::Duration; - - use torrust_tracker::tracker::auth::Key; - - use crate::api::asserts::{ - assert_auth_key, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_method_not_allowed, assert_ok, assert_token_not_valid, assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::server::start_default_api; - use crate::api::{force_database_error, Version}; - - #[tokio::test] - async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - - let response = Client::new(api_server.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; - - let auth_key_resource = assert_auth_key(response).await; - - // Verify the key with the tracker - assert!(api_server - .tracker - .verify_auth_key(&Key::from(auth_key_resource)) - .await - .is_ok()); - } - - #[tokio::test] - async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .generate_auth_key(seconds_valid) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .generate_auth_key(seconds_valid) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_fail_generating_a_new_auth_key_when_the_key_duration_cannot_be_parsed() { - let api_server = start_default_api(&Version::Warp).await; - - let invalid_key_duration = -1; - - let response = Client::new(api_server.get_connection_info()) - .generate_auth_key(invalid_key_duration) - .await; - - assert_method_not_allowed(response).await; - } - - #[tokio::test] - async fn should_fail_when_the_auth_key_cannot_be_generated() { - let api_server = start_default_api(&Version::Warp).await; - - force_database_error(&api_server.tracker); - - let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; - - assert_failed_to_generate_key(response).await; - } - - #[tokio::test] - async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - let auth_key = api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.key) - .await; - - assert_ok(response).await; - } - - #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_id_cannot_be_parsed() { - let api_server = start_default_api(&Version::Warp).await; - - let invalid_auth_key_id = "INVALID AUTH KEY ID"; - - let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(invalid_auth_key_id) - .await; - - assert_failed_to_delete_key(response).await; - } - - #[tokio::test] - async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - let auth_key = api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.key) - .await; - - assert_failed_to_delete_key(response).await; - } - - #[tokio::test] - async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - - // Generate new auth key - let auth_key = api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key) - .await; - - assert_token_not_valid(response).await; - - // Generate new auth key - let auth_key = api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key) - .await; - - assert_unauthorized(response).await; - } - - #[tokio::test] - async fn should_allow_reloading_keys() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(api_server.get_connection_info()).reload_keys().await; - - assert_ok(response).await; - } - - #[tokio::test] - async fn should_fail_when_keys_cannot_be_reloaded() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - force_database_error(&api_server.tracker); - - let response = Client::new(api_server.get_connection_info()).reload_keys().await; - - assert_failed_to_reload_keys(response).await; - } - - #[tokio::test] - async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Warp).await; - - let seconds_valid = 60; - api_server - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .reload_keys() - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .reload_keys() - .await; - - assert_unauthorized(response).await; - } - } -} - -/// The new API implementation using Axum mod tracker_apis { /* @@ -790,11 +60,10 @@ mod tracker_apis { use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::{Client, Query, QueryParam}; use crate::api::server::start_default_api; - use crate::api::Version; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let token = api_server.get_connection_info().api_token.unwrap(); @@ -807,7 +76,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(api_server.get_connection_info()) .get_request_with_query("stats", Query::default()) @@ -818,7 +87,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(api_server.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) @@ -829,7 +98,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(api_server.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) @@ -840,7 +109,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let token = api_server.get_connection_info().api_token.unwrap(); @@ -871,11 +140,10 @@ mod tracker_apis { use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; - use crate::api::Version; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; api_server .add_torrent( @@ -912,7 +180,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_tracker_statistics() @@ -944,11 +212,10 @@ mod tracker_apis { use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; - use crate::api::Version; #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -973,7 +240,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1001,7 +268,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1029,7 +296,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; @@ -1044,7 +311,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; @@ -1059,7 +326,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrents(Query::empty()) @@ -1076,7 +343,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1103,7 +370,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1116,7 +383,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(api_server.get_connection_info()) @@ -1137,7 +404,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -1170,12 +437,12 @@ mod tracker_apis { }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::force_database_error; use crate::api::server::start_default_api; - use crate::api::{force_database_error, Version}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1194,7 +461,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1209,7 +476,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1228,7 +495,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1243,7 +510,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(api_server.get_connection_info()) @@ -1264,7 +531,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1280,7 +547,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -1293,7 +560,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(api_server.get_connection_info()) @@ -1314,7 +581,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1331,7 +598,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1353,7 +620,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1376,7 +643,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -1402,12 +669,12 @@ mod tracker_apis { }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::api::force_database_error; use crate::api::server::start_default_api; - use crate::api::{force_database_error, Version}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; @@ -1427,7 +694,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; @@ -1446,7 +713,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let invalid_key_durations = [ // "", it returns 404 @@ -1465,7 +732,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; force_database_error(&api_server.tracker); @@ -1479,7 +746,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; let auth_key = api_server @@ -1497,7 +764,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let invalid_auth_key_ids = [ // "", it returns a 404 @@ -1520,7 +787,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; let auth_key = api_server @@ -1540,7 +807,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; @@ -1573,7 +840,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_reloading_keys() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; api_server @@ -1589,7 +856,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; api_server @@ -1607,7 +874,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api(&Version::Axum).await; + let api_server = start_default_api().await; let seconds_valid = 60; api_server From 6dd3c482868dbee638e71b4bee86cce5c751d476 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 17:04:38 +0000 Subject: [PATCH 257/435] refactor(api): [#143] move API resources mod --- src/api/mod.rs | 1 - src/apis/mod.rs | 1 + src/{api/resource => apis/resources}/auth_key.rs | 0 src/{api/resource => apis/resources}/mod.rs | 0 src/{api/resource => apis/resources}/peer.rs | 0 src/{api/resource => apis/resources}/stats.rs | 0 src/{api/resource => apis/resources}/torrent.rs | 4 ++-- src/apis/routes.rs | 6 +++--- src/lib.rs | 1 - tests/api/asserts.rs | 6 +++--- tests/tracker_api.rs | 8 ++++---- 11 files changed, 13 insertions(+), 14 deletions(-) delete mode 100644 src/api/mod.rs rename src/{api/resource => apis/resources}/auth_key.rs (100%) rename src/{api/resource => apis/resources}/mod.rs (100%) rename src/{api/resource => apis/resources}/peer.rs (100%) rename src/{api/resource => apis/resources}/stats.rs (100%) rename src/{api/resource => apis/resources}/torrent.rs (97%) diff --git a/src/api/mod.rs b/src/api/mod.rs deleted file mode 100644 index c6bee0532..000000000 --- a/src/api/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod resource; diff --git a/src/apis/mod.rs b/src/apis/mod.rs index ea1615d6b..7ed3ecb76 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,3 +1,4 @@ pub mod middlewares; +pub mod resources; pub mod routes; pub mod server; diff --git a/src/api/resource/auth_key.rs b/src/apis/resources/auth_key.rs similarity index 100% rename from src/api/resource/auth_key.rs rename to src/apis/resources/auth_key.rs diff --git a/src/api/resource/mod.rs b/src/apis/resources/mod.rs similarity index 100% rename from src/api/resource/mod.rs rename to src/apis/resources/mod.rs diff --git a/src/api/resource/peer.rs b/src/apis/resources/peer.rs similarity index 100% rename from src/api/resource/peer.rs rename to src/apis/resources/peer.rs diff --git a/src/api/resource/stats.rs b/src/apis/resources/stats.rs similarity index 100% rename from src/api/resource/stats.rs rename to src/apis/resources/stats.rs diff --git a/src/api/resource/torrent.rs b/src/apis/resources/torrent.rs similarity index 97% rename from src/api/resource/torrent.rs rename to src/apis/resources/torrent.rs index 56fead37a..3d8b2f427 100644 --- a/src/api/resource/torrent.rs +++ b/src/apis/resources/torrent.rs @@ -74,8 +74,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::api::resource::peer::Peer; - use crate::api::resource::torrent::{ListItem, Torrent}; + use crate::apis::resources::peer::Peer; + use crate::apis::resources::torrent::{ListItem, Torrent}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/apis/routes.rs b/src/apis/routes.rs index f0585b225..e11e7d4c8 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -12,9 +12,9 @@ use serde::{de, Deserialize, Deserializer, Serialize}; use serde_json::json; use super::middlewares::auth::auth; -use crate::api::resource::auth_key::AuthKey; -use crate::api::resource::stats::Stats; -use crate::api::resource::torrent::{ListItem, Torrent}; +use crate::apis::resources::auth_key::AuthKey; +use crate::apis::resources::stats::Stats; +use crate::apis::resources::torrent::{ListItem, Torrent}; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::KeyId; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; diff --git a/src/lib.rs b/src/lib.rs index ebf589aa9..e8cf53045 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -pub mod api; pub mod apis; pub mod config; pub mod databases; diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 11aac64d1..07383f795 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker::api::resource::auth_key::AuthKey; -use torrust_tracker::api::resource::stats::Stats; -use torrust_tracker::api::resource::torrent::{ListItem, Torrent}; +use torrust_tracker::apis::resources::auth_key::AuthKey; +use torrust_tracker::apis::resources::stats::Stats; +use torrust_tracker::apis::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index d07d2fe2d..9609e80ab 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -132,7 +132,7 @@ mod tracker_apis { mod for_stats_resources { use std::str::FromStr; - use torrust_tracker::api::resource::stats::Stats; + use torrust_tracker::apis::resources::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; @@ -199,8 +199,8 @@ mod tracker_apis { mod for_torrent_resources { use std::str::FromStr; - use torrust_tracker::api::resource::torrent::Torrent; - use torrust_tracker::api::resource::{self, torrent}; + use torrust_tracker::apis::resources::torrent::Torrent; + use torrust_tracker::apis::resources::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; @@ -362,7 +362,7 @@ mod tracker_apis { seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![resource::peer::Peer::from(peer)]), + peers: Some(vec![resources::peer::Peer::from(peer)]), }, ) .await; From 2a92b0a01a1f199dab23bfef3ffbd3ee0a9cb272 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 17:18:41 +0000 Subject: [PATCH 258/435] refactor(api): extract mod for API responses --- src/apis/mod.rs | 1 + src/apis/responses.rs | 130 ++++++++++++++++++++++++++++++++++++++++++ src/apis/routes.rs | 118 ++++---------------------------------- 3 files changed, 142 insertions(+), 107 deletions(-) create mode 100644 src/apis/responses.rs diff --git a/src/apis/mod.rs b/src/apis/mod.rs index 7ed3ecb76..5a2aca52a 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,4 +1,5 @@ pub mod middlewares; pub mod resources; +pub mod responses; pub mod routes; pub mod server; diff --git a/src/apis/responses.rs b/src/apis/responses.rs new file mode 100644 index 000000000..179c5f8d1 --- /dev/null +++ b/src/apis/responses.rs @@ -0,0 +1,130 @@ +use axum::http::{header, StatusCode}; +use axum::response::{IntoResponse, Json, Response}; +use serde::Serialize; +use serde_json::json; + +use crate::apis::resources::auth_key::AuthKey; +use crate::apis::resources::stats::Stats; +use crate::apis::resources::torrent::{ListItem, Torrent}; +use crate::tracker::services::statistics::TrackerMetrics; +use crate::tracker::services::torrent::{BasicInfo, Info}; + +#[derive(Serialize, Debug)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum ActionStatus<'a> { + Ok, + Err { reason: std::borrow::Cow<'a, str> }, +} + +// Resource responses + +#[must_use] +pub fn response_stats(tracker_metrics: TrackerMetrics) -> Json { + Json(Stats::from(tracker_metrics)) +} + +#[must_use] +pub fn response_torrent_list(basic_infos: &[BasicInfo]) -> Json> { + Json(ListItem::new_vec(basic_infos)) +} + +#[must_use] +pub fn response_torrent_info(info: Info) -> Response { + Json(Torrent::from(info)).into_response() +} + +/// # Panics +/// +/// Will panic if it can't convert the `AuthKey` resource to json +#[must_use] +pub fn response_auth_key(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + +// OK response + +/// # Panics +/// +/// Will panic if it can't convert the `ActionStatus` to json +#[must_use] +pub fn response_ok() -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json")], + serde_json::to_string(&ActionStatus::Ok).unwrap(), + ) + .into_response() +} + +// Error responses + +#[must_use] +pub fn response_invalid_info_hash_param(info_hash: &str) -> Response { + response_bad_request(&format!( + "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", + info_hash + )) +} + +#[must_use] +pub fn response_invalid_auth_key_param(invalid_key: &str) -> Response { + response_bad_request(&format!("Invalid auth key id param \"{invalid_key}\"")) +} + +fn response_bad_request(body: &str) -> Response { + ( + StatusCode::BAD_REQUEST, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + body.to_owned(), + ) + .into_response() +} + +#[must_use] +pub fn response_torrent_not_known() -> Response { + Json(json!("torrent not known")).into_response() +} + +#[must_use] +pub fn response_failed_to_remove_torrent_from_whitelist() -> Response { + response_unhandled_rejection("failed to remove torrent from whitelist".to_string()) +} + +#[must_use] +pub fn response_failed_to_whitelist_torrent() -> Response { + response_unhandled_rejection("failed to whitelist torrent".to_string()) +} + +#[must_use] +pub fn response_failed_to_reload_whitelist() -> Response { + response_unhandled_rejection("failed to reload whitelist".to_string()) +} + +#[must_use] +pub fn response_failed_to_generate_key() -> Response { + response_unhandled_rejection("failed to generate key".to_string()) +} + +#[must_use] +pub fn response_failed_to_delete_key() -> Response { + response_unhandled_rejection("failed to delete key".to_string()) +} + +#[must_use] +pub fn response_failed_to_reload_keys() -> Response { + response_unhandled_rejection("failed to reload keys".to_string()) +} + +fn response_unhandled_rejection(reason: String) -> Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), + ) + .into_response() +} diff --git a/src/apis/routes.rs b/src/apis/routes.rs index e11e7d4c8..2f96569e3 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -4,21 +4,25 @@ use std::sync::Arc; use std::time::Duration; use axum::extract::{Path, Query, State}; -use axum::http::{header, StatusCode}; -use axum::response::{IntoResponse, Json, Response}; +use axum::response::{Json, Response}; use axum::routing::{delete, get, post}; use axum::{middleware, Router}; -use serde::{de, Deserialize, Deserializer, Serialize}; -use serde_json::json; +use serde::{de, Deserialize, Deserializer}; use super::middlewares::auth::auth; +use super::responses::{ + response_auth_key, response_failed_to_delete_key, response_failed_to_generate_key, response_failed_to_reload_keys, + response_failed_to_reload_whitelist, response_failed_to_remove_torrent_from_whitelist, response_failed_to_whitelist_torrent, + response_invalid_auth_key_param, response_invalid_info_hash_param, response_ok, response_stats, response_torrent_info, + response_torrent_list, response_torrent_not_known, +}; use crate::apis::resources::auth_key::AuthKey; use crate::apis::resources::stats::Stats; -use crate::apis::resources::torrent::{ListItem, Torrent}; +use crate::apis::resources::torrent::ListItem; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::KeyId; -use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, BasicInfo, Info, Pagination}; +use crate::tracker::services::statistics::get_metrics; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; /* code-review: @@ -82,106 +86,6 @@ pub fn router(tracker: &Arc) -> Router { .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } -#[derive(Serialize, Debug)] -#[serde(tag = "status", rename_all = "snake_case")] -pub enum ActionStatus<'a> { - Ok, - Err { reason: std::borrow::Cow<'a, str> }, -} - -// Resource responses - -fn response_stats(tracker_metrics: TrackerMetrics) -> Json { - Json(Stats::from(tracker_metrics)) -} - -fn response_torrent_list(basic_infos: &[BasicInfo]) -> Json> { - Json(ListItem::new_vec(basic_infos)) -} - -fn response_torrent_info(info: Info) -> Response { - Json(Torrent::from(info)).into_response() -} - -fn response_auth_key(auth_key: &AuthKey) -> Response { - ( - StatusCode::OK, - [(header::CONTENT_TYPE, "application/json; charset=utf-8")], - serde_json::to_string(auth_key).unwrap(), - ) - .into_response() -} - -// OK response - -fn response_ok() -> Response { - ( - StatusCode::OK, - [(header::CONTENT_TYPE, "application/json")], - serde_json::to_string(&ActionStatus::Ok).unwrap(), - ) - .into_response() -} - -// Error responses - -fn response_invalid_info_hash_param(info_hash: &str) -> Response { - response_bad_request(&format!( - "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", - info_hash - )) -} - -fn response_invalid_auth_key_param(invalid_key: &str) -> Response { - response_bad_request(&format!("Invalid auth key id param \"{invalid_key}\"")) -} - -fn response_bad_request(body: &str) -> Response { - ( - StatusCode::BAD_REQUEST, - [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - body.to_owned(), - ) - .into_response() -} - -fn response_torrent_not_known() -> Response { - Json(json!("torrent not known")).into_response() -} - -fn response_failed_to_remove_torrent_from_whitelist() -> Response { - response_unhandled_rejection("failed to remove torrent from whitelist".to_string()) -} - -fn response_failed_to_whitelist_torrent() -> Response { - response_unhandled_rejection("failed to whitelist torrent".to_string()) -} - -fn response_failed_to_reload_whitelist() -> Response { - response_unhandled_rejection("failed to reload whitelist".to_string()) -} - -fn response_failed_to_generate_key() -> Response { - response_unhandled_rejection("failed to generate key".to_string()) -} - -fn response_failed_to_delete_key() -> Response { - response_unhandled_rejection("failed to delete key".to_string()) -} - -fn response_failed_to_reload_keys() -> Response { - response_unhandled_rejection("failed to reload keys".to_string()) -} - -fn response_unhandled_rejection(reason: String) -> Response { - ( - StatusCode::INTERNAL_SERVER_ERROR, - [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), - ) - .into_response() -} - pub async fn get_stats_handler(State(tracker): State>) -> Json { response_stats(get_metrics(tracker.clone()).await) } From 0940463449a314bd35eec17caf0d31f0ae9283a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 17:32:50 +0000 Subject: [PATCH 259/435] refactor(api): [#143] extract api mods --- src/apis/handlers.rs | 138 +++++++++++++++++++++++++++++++++++++++++++ src/apis/mod.rs | 1 + src/apis/routes.rs | 138 ++----------------------------------------- 3 files changed, 143 insertions(+), 134 deletions(-) create mode 100644 src/apis/handlers.rs diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs new file mode 100644 index 000000000..d625b761a --- /dev/null +++ b/src/apis/handlers.rs @@ -0,0 +1,138 @@ +use std::fmt; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use axum::extract::{Path, Query, State}; +use axum::response::{Json, Response}; +use serde::{de, Deserialize, Deserializer}; + +use super::responses::{ + response_auth_key, response_failed_to_delete_key, response_failed_to_generate_key, response_failed_to_reload_keys, + response_failed_to_reload_whitelist, response_failed_to_remove_torrent_from_whitelist, response_failed_to_whitelist_torrent, + response_invalid_auth_key_param, response_invalid_info_hash_param, response_ok, response_stats, response_torrent_info, + response_torrent_list, response_torrent_not_known, +}; +use crate::apis::resources::auth_key::AuthKey; +use crate::apis::resources::stats::Stats; +use crate::apis::resources::torrent::ListItem; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth::KeyId; +use crate::tracker::services::statistics::get_metrics; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use crate::tracker::Tracker; + +pub async fn get_stats_handler(State(tracker): State>) -> Json { + response_stats(get_metrics(tracker.clone()).await) +} + +#[derive(Deserialize)] +pub struct InfoHashParam(String); + +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { + Some(info) => response_torrent_info(info), + None => response_torrent_not_known(), + }, + } +} + +#[derive(Deserialize)] +pub struct PaginationParams { + #[serde(default, deserialize_with = "empty_string_as_none")] + pub offset: Option, + pub limit: Option, +} + +pub async fn get_torrents_handler( + State(tracker): State>, + pagination: Query, +) -> Json> { + response_torrent_list( + &get_torrents( + tracker.clone(), + &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + ) + .await, + ) +} + +pub async fn add_torrent_to_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(..) => response_ok(), + Err(..) => response_failed_to_whitelist_torrent(), + }, + } +} + +pub async fn remove_torrent_from_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => response_invalid_info_hash_param(&info_hash.0), + Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(..) => response_ok(), + Err(..) => response_failed_to_remove_torrent_from_whitelist(), + }, + } +} + +pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { + match tracker.load_whitelist().await { + Ok(..) => response_ok(), + Err(..) => response_failed_to_reload_whitelist(), + } +} + +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + let seconds_valid = seconds_valid_or_key; + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), + Err(_) => response_failed_to_generate_key(), + } +} + +#[derive(Deserialize)] +pub struct KeyIdParam(String); + +pub async fn delete_auth_key_handler( + State(tracker): State>, + Path(seconds_valid_or_key): Path, +) -> Response { + match KeyId::from_str(&seconds_valid_or_key.0) { + Err(_) => response_invalid_auth_key_param(&seconds_valid_or_key.0), + Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { + Ok(_) => response_ok(), + Err(_) => response_failed_to_delete_key(), + }, + } +} + +pub async fn reload_keys_handler(State(tracker): State>) -> Response { + match tracker.load_keys().await { + Ok(..) => response_ok(), + Err(..) => response_failed_to_reload_keys(), + } +} + +/// Serde deserialization decorator to map empty Strings to None, +fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: FromStr, + T::Err: fmt::Display, +{ + let opt = Option::::deserialize(de)?; + match opt.as_deref() { + None | Some("") => Ok(None), + Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), + } +} diff --git a/src/apis/mod.rs b/src/apis/mod.rs index 5a2aca52a..a646d5543 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,3 +1,4 @@ +pub mod handlers; pub mod middlewares; pub mod resources; pub mod responses; diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 2f96569e3..a37d79b4d 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,28 +1,13 @@ -use std::fmt; -use std::str::FromStr; use std::sync::Arc; -use std::time::Duration; -use axum::extract::{Path, Query, State}; -use axum::response::{Json, Response}; use axum::routing::{delete, get, post}; use axum::{middleware, Router}; -use serde::{de, Deserialize, Deserializer}; -use super::middlewares::auth::auth; -use super::responses::{ - response_auth_key, response_failed_to_delete_key, response_failed_to_generate_key, response_failed_to_reload_keys, - response_failed_to_reload_whitelist, response_failed_to_remove_torrent_from_whitelist, response_failed_to_whitelist_torrent, - response_invalid_auth_key_param, response_invalid_info_hash_param, response_ok, response_stats, response_torrent_info, - response_torrent_list, response_torrent_not_known, +use super::handlers::{ + add_torrent_to_whitelist_handler, delete_auth_key_handler, generate_auth_key_handler, get_stats_handler, get_torrent_handler, + get_torrents_handler, reload_keys_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler, }; -use crate::apis::resources::auth_key::AuthKey; -use crate::apis::resources::stats::Stats; -use crate::apis::resources::torrent::ListItem; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::KeyId; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use super::middlewares::auth::auth; use crate::tracker::Tracker; /* code-review: @@ -85,118 +70,3 @@ pub fn router(tracker: &Arc) -> Router { .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } - -pub async fn get_stats_handler(State(tracker): State>) -> Json { - response_stats(get_metrics(tracker.clone()).await) -} - -#[derive(Deserialize)] -pub struct InfoHashParam(String); - -pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), - Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { - Some(info) => response_torrent_info(info), - None => response_torrent_not_known(), - }, - } -} - -#[derive(Deserialize)] -pub struct PaginationParams { - #[serde(default, deserialize_with = "empty_string_as_none")] - pub offset: Option, - pub limit: Option, -} - -pub async fn get_torrents_handler( - State(tracker): State>, - pagination: Query, -) -> Json> { - response_torrent_list( - &get_torrents( - tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), - ) - .await, - ) -} - -pub async fn add_torrent_to_whitelist_handler( - State(tracker): State>, - Path(info_hash): Path, -) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), - Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_whitelist_torrent(), - }, - } -} - -pub async fn remove_torrent_from_whitelist_handler( - State(tracker): State>, - Path(info_hash): Path, -) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), - Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_remove_torrent_from_whitelist(), - }, - } -} - -pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { - match tracker.load_whitelist().await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_reload_whitelist(), - } -} - -pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { - let seconds_valid = seconds_valid_or_key; - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), - Err(_) => response_failed_to_generate_key(), - } -} - -#[derive(Deserialize)] -pub struct KeyIdParam(String); - -pub async fn delete_auth_key_handler( - State(tracker): State>, - Path(seconds_valid_or_key): Path, -) -> Response { - match KeyId::from_str(&seconds_valid_or_key.0) { - Err(_) => response_invalid_auth_key_param(&seconds_valid_or_key.0), - Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { - Ok(_) => response_ok(), - Err(_) => response_failed_to_delete_key(), - }, - } -} - -pub async fn reload_keys_handler(State(tracker): State>) -> Response { - match tracker.load_keys().await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_reload_keys(), - } -} - -/// Serde deserialization decorator to map empty Strings to None, -fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> -where - D: Deserializer<'de>, - T: FromStr, - T::Err: fmt::Display, -{ - let opt = Option::::deserialize(de)?; - match opt.as_deref() { - None | Some("") => Ok(None), - Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), - } -} From 6ddbdd95c8fe2f96e306e753d5b3fa3bb1461838 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 17:36:18 +0000 Subject: [PATCH 260/435] docs(api): [#143] move comment --- src/apis/responses.rs | 20 ++++++++++++++++++++ src/apis/routes.rs | 20 -------------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/apis/responses.rs b/src/apis/responses.rs index 179c5f8d1..e5314d410 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -9,6 +9,26 @@ use crate::apis::resources::torrent::{ListItem, Torrent}; use crate::tracker::services::statistics::TrackerMetrics; use crate::tracker::services::torrent::{BasicInfo, Info}; +/* code-review: + When Axum cannot parse a path or query param it shows a message like this: + + For the "seconds_valid_or_key" path param: + + "Invalid URL: Cannot parse "-1" to a `u64`" + + That message is not an informative message, specially if you have more than one param. + We should show a message similar to the one we use when we parse the value in the handler. + For example: + + "Invalid URL: invalid infohash param: string \"INVALID VALUE\", expected a 40 character long string" + + We can customize the error message by using a custom type with custom serde deserialization. + The same we are using for the "InfoHashVisitor". + + Input data from HTTP requests should use struts with primitive types (first level of validation). + We can put the second level of validation in the application and domain services. +*/ + #[derive(Serialize, Debug)] #[serde(tag = "status", rename_all = "snake_case")] pub enum ActionStatus<'a> { diff --git a/src/apis/routes.rs b/src/apis/routes.rs index a37d79b4d..281979aa5 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -10,26 +10,6 @@ use super::handlers::{ use super::middlewares::auth::auth; use crate::tracker::Tracker; -/* code-review: - When Axum cannot parse a path or query param it shows a message like this: - - For the "seconds_valid_or_key" path param: - - "Invalid URL: Cannot parse "-1" to a `u64`" - - That message is not an informative message, specially if you have more than one param. - We should show a message similar to the one we use when we parse the value in the handler. - For example: - - "Invalid URL: invalid infohash param: string \"INVALID VALUE\", expected a 40 character long string" - - We can customize the error message by using a custom type with custom serde deserialization. - The same we are using for the "InfoHashVisitor". - - Input data from HTTP requests should use struts with primitive types (first level of validation). - We can put the second level of validation in the application and domain services. -*/ - pub fn router(tracker: &Arc) -> Router { Router::new() // Stats From 6955666bf0d7be54d9bae8936237f7d9608ac8ba Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:10:54 +0000 Subject: [PATCH 261/435] docs(api): [#143] remove comment --- tests/tracker_api.rs | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 9609e80ab..e4fff7ca4 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -9,33 +9,6 @@ mod api; mod tracker_apis { - /* - - Endpoints: - - Stats: - - [ ] GET /api/stats - - Torrents: - - [ ] GET /api/torrents?offset=:u32&limit=:u32 - - [ ] GET /api/torrent/:info_hash - - Whitelisted torrents: - - [ ] POST /api/whitelist/:info_hash - - [ ] DELETE /api/whitelist/:info_hash - - Whitelist commands: - - [ ] GET /api/whitelist/reload - - Keys: - - [ ] POST /api/key/:seconds_valid - - [ ] DELETE /api/key/:key - - Keys commands - - [ ] GET /api/keys/reload - - */ - // When these infohashes are used in URL path params // the response is a custom response returned in the handler fn invalid_infohashes_returning_bad_request() -> Vec { From 02dfe3ee87acaa20b4ed4b4432dab2ca9a5e8b34 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:22:12 +0000 Subject: [PATCH 262/435] refactor(api): [#143] rename response functions --- src/apis/handlers.rs | 48 +++++++++++++++++++++---------------------- src/apis/responses.rs | 48 +++++++++++++++++++++---------------------- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index d625b761a..8a66b4d76 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -8,10 +8,10 @@ use axum::response::{Json, Response}; use serde::{de, Deserialize, Deserializer}; use super::responses::{ - response_auth_key, response_failed_to_delete_key, response_failed_to_generate_key, response_failed_to_reload_keys, - response_failed_to_reload_whitelist, response_failed_to_remove_torrent_from_whitelist, response_failed_to_whitelist_torrent, - response_invalid_auth_key_param, response_invalid_info_hash_param, response_ok, response_stats, response_torrent_info, - response_torrent_list, response_torrent_not_known, + auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, + failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, + invalid_auth_key_param_response, invalid_info_hash_param_response, ok_response, stats_response, torrent_info_response, + torrent_list_response, torrent_not_known_response, }; use crate::apis::resources::auth_key::AuthKey; use crate::apis::resources::stats::Stats; @@ -23,7 +23,7 @@ use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Paginati use crate::tracker::Tracker; pub async fn get_stats_handler(State(tracker): State>) -> Json { - response_stats(get_metrics(tracker.clone()).await) + stats_response(get_metrics(tracker.clone()).await) } #[derive(Deserialize)] @@ -31,10 +31,10 @@ pub struct InfoHashParam(String); pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), + Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { - Some(info) => response_torrent_info(info), - None => response_torrent_not_known(), + Some(info) => torrent_info_response(info), + None => torrent_not_known_response(), }, } } @@ -50,7 +50,7 @@ pub async fn get_torrents_handler( State(tracker): State>, pagination: Query, ) -> Json> { - response_torrent_list( + torrent_list_response( &get_torrents( tracker.clone(), &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), @@ -64,10 +64,10 @@ pub async fn add_torrent_to_whitelist_handler( Path(info_hash): Path, ) -> Response { match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), + Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_whitelist_torrent(), + Ok(..) => ok_response(), + Err(..) => failed_to_whitelist_torrent_response(), }, } } @@ -77,26 +77,26 @@ pub async fn remove_torrent_from_whitelist_handler( Path(info_hash): Path, ) -> Response { match InfoHash::from_str(&info_hash.0) { - Err(_) => response_invalid_info_hash_param(&info_hash.0), + Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_remove_torrent_from_whitelist(), + Ok(..) => ok_response(), + Err(..) => failed_to_remove_torrent_from_whitelist_response(), }, } } pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { match tracker.load_whitelist().await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_reload_whitelist(), + Ok(..) => ok_response(), + Err(..) => failed_to_reload_whitelist_response(), } } pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { let seconds_valid = seconds_valid_or_key; match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => response_auth_key(&AuthKey::from(auth_key)), - Err(_) => response_failed_to_generate_key(), + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(_) => failed_to_generate_key_response(), } } @@ -108,18 +108,18 @@ pub async fn delete_auth_key_handler( Path(seconds_valid_or_key): Path, ) -> Response { match KeyId::from_str(&seconds_valid_or_key.0) { - Err(_) => response_invalid_auth_key_param(&seconds_valid_or_key.0), + Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { - Ok(_) => response_ok(), - Err(_) => response_failed_to_delete_key(), + Ok(_) => ok_response(), + Err(_) => failed_to_delete_key_response(), }, } } pub async fn reload_keys_handler(State(tracker): State>) -> Response { match tracker.load_keys().await { - Ok(..) => response_ok(), - Err(..) => response_failed_to_reload_keys(), + Ok(..) => ok_response(), + Err(..) => failed_to_reload_keys_response(), } } diff --git a/src/apis/responses.rs b/src/apis/responses.rs index e5314d410..0b2a14c70 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -39,17 +39,17 @@ pub enum ActionStatus<'a> { // Resource responses #[must_use] -pub fn response_stats(tracker_metrics: TrackerMetrics) -> Json { +pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { Json(Stats::from(tracker_metrics)) } #[must_use] -pub fn response_torrent_list(basic_infos: &[BasicInfo]) -> Json> { +pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { Json(ListItem::new_vec(basic_infos)) } #[must_use] -pub fn response_torrent_info(info: Info) -> Response { +pub fn torrent_info_response(info: Info) -> Response { Json(Torrent::from(info)).into_response() } @@ -57,7 +57,7 @@ pub fn response_torrent_info(info: Info) -> Response { /// /// Will panic if it can't convert the `AuthKey` resource to json #[must_use] -pub fn response_auth_key(auth_key: &AuthKey) -> Response { +pub fn auth_key_response(auth_key: &AuthKey) -> Response { ( StatusCode::OK, [(header::CONTENT_TYPE, "application/json; charset=utf-8")], @@ -72,7 +72,7 @@ pub fn response_auth_key(auth_key: &AuthKey) -> Response { /// /// Will panic if it can't convert the `ActionStatus` to json #[must_use] -pub fn response_ok() -> Response { +pub fn ok_response() -> Response { ( StatusCode::OK, [(header::CONTENT_TYPE, "application/json")], @@ -84,19 +84,19 @@ pub fn response_ok() -> Response { // Error responses #[must_use] -pub fn response_invalid_info_hash_param(info_hash: &str) -> Response { - response_bad_request(&format!( +pub fn invalid_info_hash_param_response(info_hash: &str) -> Response { + bad_request_response(&format!( "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", info_hash )) } #[must_use] -pub fn response_invalid_auth_key_param(invalid_key: &str) -> Response { - response_bad_request(&format!("Invalid auth key id param \"{invalid_key}\"")) +pub fn invalid_auth_key_param_response(invalid_key: &str) -> Response { + bad_request_response(&format!("Invalid auth key id param \"{invalid_key}\"")) } -fn response_bad_request(body: &str) -> Response { +fn bad_request_response(body: &str) -> Response { ( StatusCode::BAD_REQUEST, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], @@ -106,41 +106,41 @@ fn response_bad_request(body: &str) -> Response { } #[must_use] -pub fn response_torrent_not_known() -> Response { +pub fn torrent_not_known_response() -> Response { Json(json!("torrent not known")).into_response() } #[must_use] -pub fn response_failed_to_remove_torrent_from_whitelist() -> Response { - response_unhandled_rejection("failed to remove torrent from whitelist".to_string()) +pub fn failed_to_remove_torrent_from_whitelist_response() -> Response { + unhandled_rejection_response("failed to remove torrent from whitelist".to_string()) } #[must_use] -pub fn response_failed_to_whitelist_torrent() -> Response { - response_unhandled_rejection("failed to whitelist torrent".to_string()) +pub fn failed_to_whitelist_torrent_response() -> Response { + unhandled_rejection_response("failed to whitelist torrent".to_string()) } #[must_use] -pub fn response_failed_to_reload_whitelist() -> Response { - response_unhandled_rejection("failed to reload whitelist".to_string()) +pub fn failed_to_reload_whitelist_response() -> Response { + unhandled_rejection_response("failed to reload whitelist".to_string()) } #[must_use] -pub fn response_failed_to_generate_key() -> Response { - response_unhandled_rejection("failed to generate key".to_string()) +pub fn failed_to_generate_key_response() -> Response { + unhandled_rejection_response("failed to generate key".to_string()) } #[must_use] -pub fn response_failed_to_delete_key() -> Response { - response_unhandled_rejection("failed to delete key".to_string()) +pub fn failed_to_delete_key_response() -> Response { + unhandled_rejection_response("failed to delete key".to_string()) } #[must_use] -pub fn response_failed_to_reload_keys() -> Response { - response_unhandled_rejection("failed to reload keys".to_string()) +pub fn failed_to_reload_keys_response() -> Response { + unhandled_rejection_response("failed to reload keys".to_string()) } -fn response_unhandled_rejection(reason: String) -> Response { +fn unhandled_rejection_response(reason: String) -> Response { ( StatusCode::INTERNAL_SERVER_ERROR, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], From b7c514431d822dfa494fe4fd6925f5241d92de44 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:33:11 +0000 Subject: [PATCH 263/435] refactor(api): [#143] change fn return type --- src/apis/handlers.rs | 4 ++-- src/apis/responses.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index 8a66b4d76..8d9689025 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use std::time::Duration; use axum::extract::{Path, Query, State}; -use axum::response::{Json, Response}; +use axum::response::{IntoResponse, Json, Response}; use serde::{de, Deserialize, Deserializer}; use super::responses::{ @@ -33,7 +33,7 @@ pub async fn get_torrent_handler(State(tracker): State>, Path(info_ match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { - Some(info) => torrent_info_response(info), + Some(info) => torrent_info_response(info).into_response(), None => torrent_not_known_response(), }, } diff --git a/src/apis/responses.rs b/src/apis/responses.rs index 0b2a14c70..7f194ab16 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -49,8 +49,8 @@ pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { } #[must_use] -pub fn torrent_info_response(info: Info) -> Response { - Json(Torrent::from(info)).into_response() +pub fn torrent_info_response(info: Info) -> Json { + Json(Torrent::from(info)) } /// # Panics From 1c72ac078bca8e5750d35f82184504ee891304d5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:39:02 +0000 Subject: [PATCH 264/435] docs(api): [#143] remove deprecated comment --- src/apis/resources/mod.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/apis/resources/mod.rs b/src/apis/resources/mod.rs index e86c550ca..bf3ce273b 100644 --- a/src/apis/resources/mod.rs +++ b/src/apis/resources/mod.rs @@ -1,12 +1,3 @@ -//! These are the Rest API resources. -//! -//! WIP. Not all endpoints have their resource structs. -//! -//! - [x] `AuthKeys` -//! - [ ] `Torrent`, `ListItem`, `Peer`, `PeerId` -//! - [ ] `StatsResource` -//! - [ ] ... - pub mod auth_key; pub mod peer; pub mod stats; From 0c3ca8798c143828d64b2bbbb13bba7c44fc9313 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jan 2023 18:59:48 +0000 Subject: [PATCH 265/435] refactor(api): [#143] remove duplicate code --- src/apis/middlewares/auth.rs | 28 ++++++++++++++++------------ src/apis/responses.rs | 5 ++++- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs index 905160a06..e54311d33 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/middlewares/auth.rs @@ -1,11 +1,12 @@ use std::sync::Arc; use axum::extract::{Query, State}; -use axum::http::{header, Request, StatusCode}; +use axum::http::Request; use axum::middleware::Next; use axum::response::{IntoResponse, Response}; use serde::Deserialize; +use crate::apis::responses::unhandled_rejection_response; use crate::config::{Configuration, HttpApi}; #[derive(Deserialize, Debug)] @@ -43,20 +44,23 @@ enum AuthError { impl IntoResponse for AuthError { fn into_response(self) -> Response { - let body = match self { - AuthError::Unauthorized => "Unhandled rejection: Err { reason: \"unauthorized\" }", - AuthError::TokenNotValid => "Unhandled rejection: Err { reason: \"token not valid\" }", - }; - - ( - StatusCode::INTERNAL_SERVER_ERROR, - [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], - body, - ) - .into_response() + match self { + AuthError::Unauthorized => unauthorized_response(), + AuthError::TokenNotValid => token_not_valid_response(), + } } } fn authenticate(token: &str, http_api_config: &HttpApi) -> bool { http_api_config.contains_token(token) } + +#[must_use] +pub fn unauthorized_response() -> Response { + unhandled_rejection_response("unauthorized".to_string()) +} + +#[must_use] +pub fn token_not_valid_response() -> Response { + unhandled_rejection_response("token not valid".to_string()) +} diff --git a/src/apis/responses.rs b/src/apis/responses.rs index 7f194ab16..b3d4cbd59 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -140,7 +140,10 @@ pub fn failed_to_reload_keys_response() -> Response { unhandled_rejection_response("failed to reload keys".to_string()) } -fn unhandled_rejection_response(reason: String) -> Response { +/// This error response is to keep backward compatibility with the old Warp API. +/// It should be a plain text or json. +#[must_use] +pub fn unhandled_rejection_response(reason: String) -> Response { ( StatusCode::INTERNAL_SERVER_ERROR, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], From ecb6f2d5a90795e9d4dfc2bc50b42e77ad825e6b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 23 Dec 2022 15:47:49 +0000 Subject: [PATCH 266/435] feat: workflow to publish crate on crates.io Workflow to publish the crate on [crates.io](https://crates.io/). It only works if the secret "CRATES_TOKEN" exists in the "crates-io-torrust" environment. Since crates.io does not support scoped tokens, we can publish the crate using a fork where the crate owners can set up their crates.io tokens without sharing them with other maintainers. --- .github/workflows/publish_crate.yml | 54 +++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 .github/workflows/publish_crate.yml diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml new file mode 100644 index 000000000..0352064eb --- /dev/null +++ b/.github/workflows/publish_crate.yml @@ -0,0 +1,54 @@ +name: Publish crate + +on: + push: + tags: + - "v*" + +jobs: + check-secret: + runs-on: ubuntu-latest + environment: crates-io-torrust + outputs: + publish: ${{ steps.check.outputs.publish }} + steps: + - id: check + env: + CRATES_TOKEN: "${{ secrets.CRATES_TOKEN }}" + if: "${{ env.CRATES_TOKEN != '' }}" + run: echo "publish=true" >> $GITHUB_OUTPUT + + test: + needs: check-secret + if: needs.check-secret.outputs.publish == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + components: llvm-tools-preview + - uses: Swatinem/rust-cache@v1 + - name: Run Tests + run: cargo test + + publish: + needs: test + if: needs.check-secret.outputs.publish == 'true' + runs-on: ubuntu-latest + environment: crates-io-torrust + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Install stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - run: cargo publish + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} From c26b35641b8baeaeaeb3eea68667db52991447cf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jan 2023 13:30:39 +0000 Subject: [PATCH 267/435] feat(http): [#159] during HTTP tracker setup wait until job is running Add a communication channel to wait until the new job is running. This is specially useful for testing, becuase tests need the HTTP server up and running before making requests. --- src/jobs/http_tracker.rs | 58 +++++++++++++++++++++++++++++++--------- src/setup.rs | 2 +- 2 files changed, 47 insertions(+), 13 deletions(-) diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index c62bc5cc9..8e38039b7 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -2,35 +2,69 @@ use std::net::SocketAddr; use std::sync::Arc; use log::{info, warn}; +use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::config::HttpTracker; use crate::http::server::Http; use crate::tracker; +#[derive(Debug)] +pub struct ServerJobStarted(); + /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. -#[must_use] -pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config.bind_address.parse::().unwrap(); +pub async fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("HTTP tracker server bind_address invalid."); let ssl_enabled = config.ssl_enabled; let ssl_cert_path = config.ssl_cert_path.clone(); let ssl_key_path = config.ssl_key_path.clone(); - tokio::spawn(async move { + let (tx, rx) = oneshot::channel::(); + + // Run the HTTP tracker server + let join_handle = tokio::spawn(async move { let http_tracker = Http::new(tracker); if !ssl_enabled { - info!("Starting HTTP server on: http://{}", bind_addr); - http_tracker.start(bind_addr).await; + info!("Starting HTTP tracker server on: http://{}", bind_addr); + + let handle = http_tracker.start(bind_addr); + + tx.send(ServerJobStarted()) + .expect("HTTP tracker server should not be dropped"); + + handle.await; + + info!("HTTP tracker server on http://{} stopped", bind_addr); } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: https://{} (TLS)", bind_addr); - http_tracker - .start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()) - .await; + info!("Starting HTTPS server on: https://{}", bind_addr); + + let handle = http_tracker.start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()); + + tx.send(ServerJobStarted()) + .expect("HTTP tracker server should not be dropped"); + + handle.await; + + info!("HTTP tracker server on https://{} stopped", bind_addr); } else { - warn!("Could not start HTTP tracker on: {}, missing SSL Cert or Key!", bind_addr); + warn!( + "Could not start HTTPS tracker server on: {}, missing SSL Cert or Key!", + bind_addr + ); } - }) + }); + + // Wait until the HTTPS tracker server job is running + match rx.await { + Ok(_msg) => info!("HTTP tracker server started"), + Err(e) => panic!("HTTP tracker server was dropped: {e}"), + } + + join_handle } diff --git a/src/setup.rs b/src/setup.rs index e7535e67d..31be3baac 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -47,7 +47,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone())); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone()).await); } // Start HTTP API From 344920295ff4b013d0bd302e31ec34f66f82c109 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jan 2023 16:24:40 +0000 Subject: [PATCH 268/435] test(http): [#159] HTTP tracker tests scaffolding --- src/config.rs | 20 ++++++-- tests/api/client.rs | 90 ++++------------------------------- tests/common/http.rs | 75 +++++++++++++++++++++++++++++ tests/common/mod.rs | 1 + tests/http/asserts.rs | 7 +++ tests/http/client.rs | 35 ++++++++++++++ tests/http/connection_info.rs | 16 +++++++ tests/http/mod.rs | 4 ++ tests/http/server.rs | 64 +++++++++++++++++++++++++ tests/http_tracker.rs | 44 +++++++++++++++++ tests/tracker_api.rs | 7 ++- 11 files changed, 276 insertions(+), 87 deletions(-) create mode 100644 tests/common/http.rs create mode 100644 tests/common/mod.rs create mode 100644 tests/http/asserts.rs create mode 100644 tests/http/client.rs create mode 100644 tests/http/connection_info.rs create mode 100644 tests/http/mod.rs create mode 100644 tests/http/server.rs create mode 100644 tests/http_tracker.rs diff --git a/src/config.rs b/src/config.rs index 275339aa0..3ca4b37d8 100644 --- a/src/config.rs +++ b/src/config.rs @@ -84,27 +84,39 @@ pub enum Error { } /// This configuration is used for testing. It generates random config values so they do not collide -/// if you run more than one tracker at the same time. +/// if you run more than one tracker at the same time. /// /// # Panics /// /// Will panic if it can't convert the temp file path to string #[must_use] pub fn ephemeral_configuration() -> Configuration { + // todo: disable services that are not needed. + // For example: a test for the UDP tracker should disable the API and HTTP tracker. + let mut config = Configuration { - log_level: Some("off".to_owned()), + log_level: Some("off".to_owned()), // Change to `debug` for tests debugging ..Default::default() }; - // Ephemeral socket addresses + // Ephemeral socket address for API let api_port = random_port(); + config.http_api.enabled = true; config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + + // Ephemeral socket address for UDP tracker let upd_port = random_port(); + config.udp_trackers[0].enabled = true; config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &upd_port); + // Ephemeral socket address for HTTP tracker + let http_port = random_port(); + config.http_trackers[0].enabled = true; + config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); + // Ephemeral sqlite database let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}_{}.db", &api_port, &upd_port)); + let temp_file = temp_directory.join(format!("data_{}_{}_{}.db", &api_port, &upd_port, &http_port)); config.db_path = temp_file.to_str().unwrap().to_owned(); config diff --git a/tests/api/client.rs b/tests/api/client.rs index b0b864ff5..4dea732be 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -1,71 +1,14 @@ use reqwest::Response; use super::connection_info::ConnectionInfo; +use crate::common::http::{get, Query, QueryParam, ReqwestQuery}; +/// API Client pub struct Client { connection_info: ConnectionInfo, base_path: String, } -type ReqwestQuery = Vec; -type ReqwestQueryParam = (String, String); - -#[derive(Default, Debug)] -pub struct Query { - params: Vec, -} - -impl Query { - pub fn empty() -> Self { - Self { params: vec![] } - } - - pub fn params(params: Vec) -> Self { - Self { params } - } - - pub fn add_param(&mut self, param: QueryParam) { - self.params.push(param); - } - - fn with_token(token: &str) -> Self { - Self { - params: vec![QueryParam::new("token", token)], - } - } -} - -impl From for ReqwestQuery { - fn from(url_search_params: Query) -> Self { - url_search_params - .params - .iter() - .map(|param| ReqwestQueryParam::from((*param).clone())) - .collect() - } -} - -#[derive(Clone, Debug)] -pub struct QueryParam { - name: String, - value: String, -} - -impl QueryParam { - pub fn new(name: &str, value: &str) -> Self { - Self { - name: name.to_string(), - value: value.to_string(), - } - } -} - -impl From for ReqwestQueryParam { - fn from(param: QueryParam) -> Self { - (param.name, param.value) - } -} - impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { Self { @@ -138,37 +81,22 @@ impl Client { .unwrap() } - fn base_url(&self, path: &str) -> String { - format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) - } - - // Unauthenticated GET request with query component pub async fn get_request_with_query(&self, path: &str, params: Query) -> Response { - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .query(&ReqwestQuery::from(params)) - .send() - .await - .unwrap() + get(&self.base_url(path), Some(params)).await } - // Unauthenticated GET request pub async fn get_request(&self, path: &str) -> Response { - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .send() - .await - .unwrap() + get(&self.base_url(path), None).await } fn query_with_token(&self) -> Query { match &self.connection_info.api_token { - Some(token) => Query::with_token(token), + Some(token) => Query::params([QueryParam::new("token", token)].to_vec()), None => Query::default(), } } + + fn base_url(&self, path: &str) -> String { + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + } } diff --git a/tests/common/http.rs b/tests/common/http.rs new file mode 100644 index 000000000..1c2e95671 --- /dev/null +++ b/tests/common/http.rs @@ -0,0 +1,75 @@ +use reqwest::Response; + +pub type ReqwestQuery = Vec; +pub type ReqwestQueryParam = (String, String); + +pub async fn get(path: &str, query: Option) -> Response { + match query { + Some(params) => reqwest::Client::builder() + .build() + .unwrap() + .get(path) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap(), + None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), + } +} + +#[derive(Clone, Debug)] +pub struct ConnectionInfo { + pub bind_address: String, +} + +/// URL Query component +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +/// URL query param +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs new file mode 100644 index 000000000..3883215fc --- /dev/null +++ b/tests/common/mod.rs @@ -0,0 +1 @@ +pub mod http; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs new file mode 100644 index 000000000..b82c681a0 --- /dev/null +++ b/tests/http/asserts.rs @@ -0,0 +1,7 @@ +use reqwest::Response; + +pub async fn assert_internal_server_error(response: Response) { + assert_eq!(response.status(), 200); + /* cspell:disable-next-line */ + assert_eq!(response.text().await.unwrap(), "d14:failure reason21:internal server errore"); +} diff --git a/tests/http/client.rs b/tests/http/client.rs new file mode 100644 index 000000000..8bf691474 --- /dev/null +++ b/tests/http/client.rs @@ -0,0 +1,35 @@ +use reqwest::Response; + +use super::connection_info::ConnectionInfo; +use crate::common::http::{get, Query}; + +/// HTTP Tracker Client +pub struct Client { + connection_info: ConnectionInfo, + base_path: String, +} + +impl Client { + pub fn new(connection_info: ConnectionInfo) -> Self { + Self { + connection_info, + base_path: "/".to_string(), + } + } + + pub async fn announce(&self, params: Query) -> Response { + self.get("announce", params).await + } + + pub async fn scrape(&self, params: Query) -> Response { + self.get("scrape", params).await + } + + async fn get(&self, path: &str, params: Query) -> Response { + get(&self.base_url(path), Some(params)).await + } + + fn base_url(&self, path: &str) -> String { + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + } +} diff --git a/tests/http/connection_info.rs b/tests/http/connection_info.rs new file mode 100644 index 000000000..debf931e4 --- /dev/null +++ b/tests/http/connection_info.rs @@ -0,0 +1,16 @@ +use torrust_tracker::tracker::auth::Key; + +#[derive(Clone, Debug)] +pub struct ConnectionInfo { + pub bind_address: String, + pub aut_key: Option, +} + +impl ConnectionInfo { + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + aut_key: None, + } + } +} diff --git a/tests/http/mod.rs b/tests/http/mod.rs new file mode 100644 index 000000000..9e79fcd27 --- /dev/null +++ b/tests/http/mod.rs @@ -0,0 +1,4 @@ +pub mod asserts; +pub mod client; +pub mod connection_info; +pub mod server; diff --git a/tests/http/server.rs b/tests/http/server.rs new file mode 100644 index 000000000..ff2b40987 --- /dev/null +++ b/tests/http/server.rs @@ -0,0 +1,64 @@ +use core::panic; +use std::sync::Arc; + +use torrust_tracker::config::{ephemeral_configuration, Configuration}; +use torrust_tracker::jobs::http_tracker; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; + +use super::connection_info::ConnectionInfo; + +pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) +} + +pub async fn start_default_http_tracker() -> Server { + let configuration = tracker_configuration(); + start_custom_http_tracker(configuration.clone()).await +} + +pub async fn start_custom_http_tracker(configuration: Arc) -> Server { + let server = start(&configuration); + http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone()).await; + server +} + +fn start(configuration: &Arc) -> Server { + let connection_info = ConnectionInfo::anonymous(&configuration.http_trackers[0].bind_address.clone()); + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + Server { + tracker, + connection_info, + } +} + +pub struct Server { + pub tracker: Arc, + pub connection_info: ConnectionInfo, +} + +impl Server { + pub fn get_connection_info(&self) -> ConnectionInfo { + self.connection_info.clone() + } +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs new file mode 100644 index 000000000..a1e429bb8 --- /dev/null +++ b/tests/http_tracker.rs @@ -0,0 +1,44 @@ +/// Integration tests for HTTP tracker server +/// +/// cargo test `http_tracker_server` -- --nocapture +mod common; +mod http; + +mod http_tracker_server { + + mod receiving_an_announce_request { + use crate::common::http::Query; + use crate::http::asserts::assert_internal_server_error; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; + + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(Query::default()) + .await; + + assert_internal_server_error(response).await; + } + } + + mod receiving_an_scrape_request { + use crate::common::http::Query; + use crate::http::asserts::assert_internal_server_error; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; + + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; + + let response = Client::new(http_tracker_server.get_connection_info()) + .scrape(Query::default()) + .await; + + assert_internal_server_error(response).await; + } + } +} diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index e4fff7ca4..456b37f7b 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -6,6 +6,7 @@ extern crate rand; mod api; +mod common; mod tracker_apis { @@ -31,8 +32,9 @@ mod tracker_apis { mod authentication { use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; - use crate::api::client::{Client, Query, QueryParam}; + use crate::api::client::Client; use crate::api::server::start_default_api; + use crate::common::http::{Query, QueryParam}; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { @@ -181,10 +183,11 @@ mod tracker_apis { assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; - use crate::api::client::{Client, Query, QueryParam}; + use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; + use crate::common::http::{Query, QueryParam}; #[tokio::test] async fn should_allow_getting_torrents() { From 41ad07f2281ebfb70b1041bf1811af01ac6df4f3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jan 2023 18:58:50 +0000 Subject: [PATCH 269/435] refactor(http): [#159] add dependencies: serde_urlencoded and serde_repr --- Cargo.lock | 25 +++++++++++++++++++------ Cargo.toml | 3 +++ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f8d753b3..bc82c64f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2242,9 +2242,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.147" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] @@ -2270,9 +2270,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.147" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", @@ -2299,6 +2299,17 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_repr" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2523,9 +2534,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.103" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", @@ -2820,6 +2831,8 @@ dependencies = [ "serde", "serde_bencode", "serde_json", + "serde_repr", + "serde_urlencoded", "serde_with", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 434b4cace..0e67c65ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,9 @@ uuid = { version = "1", features = ["v4"] } axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } + [dev-dependencies] mockall = "0.11" reqwest = { version = "0.11.13", features = ["json"] } +serde_urlencoded = "0.7.1" +serde_repr = "0.1.10" From 1a558d2c6c9341654e55357993442dabddf56253 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jan 2023 19:30:51 +0000 Subject: [PATCH 270/435] test(http): [#159] add tests for public http tracker --- src/http/mod.rs | 4 ++ src/http/response.rs | 7 +- tests/api/client.rs | 16 ++++- tests/api/mod.rs | 1 - tests/{api => common}/fixtures.rs | 0 tests/common/http.rs | 16 ----- tests/common/mod.rs | 1 + tests/http/asserts.rs | 8 +++ tests/http/client.rs | 29 ++++----- tests/http/mod.rs | 2 + tests/http/requests.rs | 104 ++++++++++++++++++++++++++++++ tests/http/responses.rs | 18 ++++++ tests/http/server.rs | 6 ++ tests/http_tracker.rs | 102 +++++++++++++++++++++++++++-- tests/tracker_api.rs | 4 +- 15 files changed, 272 insertions(+), 46 deletions(-) rename tests/{api => common}/fixtures.rs (100%) create mode 100644 tests/http/requests.rs create mode 100644 tests/http/responses.rs diff --git a/src/http/mod.rs b/src/http/mod.rs index 701dba407..0b5a02a0e 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,3 +1,7 @@ +//! Tracker HTTP/HTTPS Protocol: +//! +//! +//! pub mod error; pub mod filters; pub mod handlers; diff --git a/src/http/response.rs b/src/http/response.rs index 962e72fac..1e9f7fa09 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -2,19 +2,18 @@ use std::collections::HashMap; use std::io::Write; use std::net::IpAddr; -use serde; -use serde::Serialize; +use serde::{self, Deserialize, Serialize}; use crate::protocol::info_hash::InfoHash; -#[derive(Serialize)] +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Peer { pub peer_id: String, pub ip: IpAddr, pub port: u16, } -#[derive(Serialize)] +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { pub interval: u32, #[serde(rename = "min interval")] diff --git a/tests/api/client.rs b/tests/api/client.rs index 4dea732be..f99805570 100644 --- a/tests/api/client.rs +++ b/tests/api/client.rs @@ -1,7 +1,7 @@ use reqwest::Response; use super::connection_info::ConnectionInfo; -use crate::common::http::{get, Query, QueryParam, ReqwestQuery}; +use crate::common::http::{Query, QueryParam, ReqwestQuery}; /// API Client pub struct Client { @@ -100,3 +100,17 @@ impl Client { format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) } } + +async fn get(path: &str, query: Option) -> Response { + match query { + Some(params) => reqwest::Client::builder() + .build() + .unwrap() + .get(path) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap(), + None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), + } +} diff --git a/tests/api/mod.rs b/tests/api/mod.rs index bc4187375..8dd6f4c53 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -5,7 +5,6 @@ use torrust_tracker::tracker::Tracker; pub mod asserts; pub mod client; pub mod connection_info; -pub mod fixtures; pub mod server; /// It forces a database error by dropping all tables. diff --git a/tests/api/fixtures.rs b/tests/common/fixtures.rs similarity index 100% rename from tests/api/fixtures.rs rename to tests/common/fixtures.rs diff --git a/tests/common/http.rs b/tests/common/http.rs index 1c2e95671..902752674 100644 --- a/tests/common/http.rs +++ b/tests/common/http.rs @@ -1,22 +1,6 @@ -use reqwest::Response; - pub type ReqwestQuery = Vec; pub type ReqwestQueryParam = (String, String); -pub async fn get(path: &str, query: Option) -> Response { - match query { - Some(params) => reqwest::Client::builder() - .build() - .unwrap() - .get(path) - .query(&ReqwestQuery::from(params)) - .send() - .await - .unwrap(), - None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), - } -} - #[derive(Clone, Debug)] pub struct ConnectionInfo { pub bind_address: String, diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 3883215fc..810620359 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1 +1,2 @@ +pub mod fixtures; pub mod http; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index b82c681a0..9a1f353c6 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,7 +1,15 @@ use reqwest::Response; +use super::responses::Announce; + pub async fn assert_internal_server_error(response: Response) { assert_eq!(response.status(), 200); /* cspell:disable-next-line */ assert_eq!(response.text().await.unwrap(), "d14:failure reason21:internal server errore"); } + +pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { + assert_eq!(response.status(), 200); + let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); + assert_eq!(announce_response, *expected_announce_response); +} diff --git a/tests/http/client.rs b/tests/http/client.rs index 8bf691474..ae51bc02e 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -1,35 +1,34 @@ use reqwest::Response; use super::connection_info::ConnectionInfo; -use crate::common::http::{get, Query}; +use super::requests::AnnounceQuery; /// HTTP Tracker Client pub struct Client { connection_info: ConnectionInfo, - base_path: String, } impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { - Self { - connection_info, - base_path: "/".to_string(), - } + Self { connection_info } } - pub async fn announce(&self, params: Query) -> Response { - self.get("announce", params).await + pub async fn announce(&self, query: &AnnounceQuery) -> Response { + let path_with_query = format!("announce?{query}"); + self.get(&path_with_query).await } - pub async fn scrape(&self, params: Query) -> Response { - self.get("scrape", params).await - } - - async fn get(&self, path: &str, params: Query) -> Response { - get(&self.base_url(path), Some(params)).await + pub async fn get(&self, path: &str) -> Response { + reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path)) + .send() + .await + .unwrap() } fn base_url(&self, path: &str) -> String { - format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + format!("http://{}/{path}", &self.connection_info.bind_address) } } diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 9e79fcd27..2ab8b2c1c 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,4 +1,6 @@ pub mod asserts; pub mod client; pub mod connection_info; +pub mod requests; +pub mod responses; pub mod server; diff --git a/tests/http/requests.rs b/tests/http/requests.rs new file mode 100644 index 000000000..170dc52a9 --- /dev/null +++ b/tests/http/requests.rs @@ -0,0 +1,104 @@ +use std::fmt; +use std::net::IpAddr; + +use percent_encoding::NON_ALPHANUMERIC; +use serde_repr::Serialize_repr; + +pub struct AnnounceQuery { + pub info_hash: ByteArray20, + pub peer_addr: IpAddr, + pub downloaded: BaseTenASCII, + pub uploaded: BaseTenASCII, + pub peer_id: ByteArray20, + pub port: PortNumber, + pub left: BaseTenASCII, + pub event: Option, + pub compact: Option, +} + +impl fmt::Display for AnnounceQuery { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Announce Request: +/// +/// +/// +/// Some parameters are not implemented yet. +impl AnnounceQuery { + /// It builds the URL query component for the announce request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + pub fn build(&self) -> String { + let mut params = vec![ + ( + "info_hash", + percent_encoding::percent_encode(&self.info_hash, NON_ALPHANUMERIC).to_string(), + ), + ("peer_addr", self.peer_addr.to_string()), + ("downloaded", self.downloaded.to_string()), + ("uploaded", self.uploaded.to_string()), + ( + "peer_id", + percent_encoding::percent_encode(&self.peer_id, NON_ALPHANUMERIC).to_string(), + ), + ("port", self.port.to_string()), + ("left", self.left.to_string()), + ]; + + if let Some(event) = &self.event { + params.push(("event", event.to_string())); + } + + if let Some(compact) = &self.compact { + params.push(("compact", compact.to_string())); + } + + params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&") + } +} + +pub type BaseTenASCII = u64; +pub type ByteArray20 = [u8; 20]; +pub type PortNumber = u16; + +pub enum Event { + //tarted, + //Stopped, + Completed, +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + //Event::Started => write!(f, "started"), + //Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(Serialize_repr, PartialEq, Debug)] +#[repr(u8)] +pub enum Compact { + //Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + //Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} diff --git a/tests/http/responses.rs b/tests/http/responses.rs new file mode 100644 index 000000000..e82197b03 --- /dev/null +++ b/tests/http/responses.rs @@ -0,0 +1,18 @@ +use serde::{self, Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DictionaryPeer { + pub ip: String, + pub peer_id: String, + pub port: u16, +} diff --git a/tests/http/server.rs b/tests/http/server.rs index ff2b40987..130c68b46 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -3,6 +3,8 @@ use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::jobs::http_tracker; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -61,4 +63,8 @@ impl Server { pub fn get_connection_info(&self) -> ConnectionInfo { self.connection_info.clone() } + + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a1e429bb8..05e2281bd 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -7,7 +7,6 @@ mod http; mod http_tracker_server { mod receiving_an_announce_request { - use crate::common::http::Query; use crate::http::asserts::assert_internal_server_error; use crate::http::client::Client; use crate::http::server::start_default_http_tracker; @@ -16,16 +15,13 @@ mod http_tracker_server { async fn should_fail_when_the_request_is_empty() { let http_tracker_server = start_default_http_tracker().await; - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(Query::default()) - .await; + let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; assert_internal_server_error(response).await; } } mod receiving_an_scrape_request { - use crate::common::http::Query; use crate::http::asserts::assert_internal_server_error; use crate::http::client::Client; use crate::http::server::start_default_http_tracker; @@ -34,11 +30,103 @@ mod http_tracker_server { async fn should_fail_when_the_request_is_empty() { let http_tracker_server = start_default_http_tracker().await; + let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + + assert_internal_server_error(response).await; + } + } +} + +mod public_http_tracker_server { + + mod receiving_an_announce_request { + use std::net::{IpAddr, Ipv4Addr}; + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::sample_peer; + use crate::http::asserts::assert_announce_response; + use crate::http::client::Client; + use crate::http::requests::{AnnounceQuery, Compact, Event}; + use crate::http::responses::{Announce, DictionaryPeer}; + use crate::http::server::start_default_http_tracker; + + fn sample_announce_query(info_hash: &InfoHash) -> AnnounceQuery { + AnnounceQuery { + info_hash: info_hash.0, + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: peer::Id(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + } + } + + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let http_tracker_server = start_default_http_tracker().await; + let response = Client::new(http_tracker_server.get_connection_info()) - .scrape(Query::default()) + .announce(&sample_announce_query( + &InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(), + )) .await; - assert_internal_server_error(response).await; + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![], + }, + ) + .await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + let http_tracker_server = start_default_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = sample_peer(); + + // Add a peer + http_tracker_server.add_torrent(&info_hash, &peer).await; + + let announce_query = sample_announce_query(&info_hash); + + assert_ne!( + announce_query.peer_id, peer.peer_id.0, + "the new peer id must be different from the previously announced peer otherwise the peer previously added peer in not included in the list" + ); + + // Announce the new peer. This new peer is non included the response peers list + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&announce_query) + .await; + + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: vec![DictionaryPeer { + ip: peer.peer_addr.ip().to_string(), + peer_id: String::new(), + port: peer.peer_addr.port(), + }], + }, + ) + .await; } } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 456b37f7b..47fda3af9 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -113,8 +113,8 @@ mod tracker_apis { use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; + use crate::common::fixtures::sample_peer; #[tokio::test] async fn should_allow_getting_tracker_statistics() { @@ -185,8 +185,8 @@ mod tracker_apis { }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::fixtures::sample_peer; use crate::api::server::start_default_api; + use crate::common::fixtures::sample_peer; use crate::common::http::{Query, QueryParam}; #[tokio::test] From dd38045c7d06d9900db52bb44a673288baf5e75e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Jan 2023 13:40:37 +0000 Subject: [PATCH 271/435] refactor(http): [#159] extract test builders --- src/http/mod.rs | 1 + tests/common/fixtures.rs | 28 +++++- tests/http/asserts.rs | 6 ++ tests/http/requests.rs | 42 +++++++- tests/http/server.rs | 8 +- tests/http_tracker.rs | 209 +++++++++++++++++++-------------------- tests/tracker_api.rs | 20 ++-- 7 files changed, 191 insertions(+), 123 deletions(-) diff --git a/src/http/mod.rs b/src/http/mod.rs index 0b5a02a0e..2fcb056d8 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,6 +1,7 @@ //! Tracker HTTP/HTTPS Protocol: //! //! +//! //! pub mod error; pub mod filters; diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index fa6099309..78f7d381f 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -2,10 +2,32 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; -use torrust_tracker::tracker::peer; +use torrust_tracker::tracker::peer::{self, Id, Peer}; -pub fn sample_peer() -> peer::Peer { - peer::Peer { +pub struct PeerBuilder { + peer: Peer, +} + +impl PeerBuilder { + pub fn default() -> PeerBuilder { + Self { + peer: default_peer_for_testing(), + } + } + + #[allow(dead_code)] + pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + self.peer.peer_id = *peer_id; + self + } + + pub fn into(self) -> Peer { + self.peer + } +} + +fn default_peer_for_testing() -> Peer { + Peer { peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 9a1f353c6..32aaf4d69 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -8,6 +8,12 @@ pub async fn assert_internal_server_error(response: Response) { assert_eq!(response.text().await.unwrap(), "d14:failure reason21:internal server errore"); } +pub async fn assert_empty_announce_response(response: Response) { + assert_eq!(response.status(), 200); + let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); + assert!(announce_response.peers.is_empty()); +} + pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { assert_eq!(response.status(), 200); let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); diff --git a/tests/http/requests.rs b/tests/http/requests.rs index 170dc52a9..7e59494c5 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -1,8 +1,11 @@ use std::fmt; -use std::net::IpAddr; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; use percent_encoding::NON_ALPHANUMERIC; use serde_repr::Serialize_repr; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Id; pub struct AnnounceQuery { pub info_hash: ByteArray20, @@ -102,3 +105,40 @@ impl fmt::Display for Compact { } } } + +pub struct AnnounceQueryBuilder { + announce_query: AnnounceQuery, +} + +impl AnnounceQueryBuilder { + pub fn default() -> AnnounceQueryBuilder { + let default_announce_query = AnnounceQuery { + info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: Id(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + }; + Self { + announce_query: default_announce_query, + } + } + + pub fn with_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.announce_query.info_hash = info_hash.0; + self + } + + pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + self.announce_query.peer_id = peer_id.0; + self + } + + pub fn into(self) -> AnnounceQuery { + self.announce_query + } +} diff --git a/tests/http/server.rs b/tests/http/server.rs index 130c68b46..32d02b060 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -10,8 +10,8 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; -pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) +pub async fn start_public_http_tracker() -> Server { + start_default_http_tracker().await } pub async fn start_default_http_tracker() -> Server { @@ -19,6 +19,10 @@ pub async fn start_default_http_tracker() -> Server { start_custom_http_tracker(configuration.clone()).await } +pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) +} + pub async fn start_custom_http_tracker(configuration: Arc) -> Server { let server = start(&configuration); http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone()).await; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 05e2281bd..bf75dfc26 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -6,127 +6,122 @@ mod http; mod http_tracker_server { - mod receiving_an_announce_request { - use crate::http::asserts::assert_internal_server_error; - use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; + mod for_all_config_modes { - #[tokio::test] - async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; + mod receiving_an_announce_request { + use crate::http::asserts::assert_internal_server_error; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; - let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; - assert_internal_server_error(response).await; - } - } + let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; - mod receiving_an_scrape_request { - use crate::http::asserts::assert_internal_server_error; - use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; + assert_internal_server_error(response).await; + } + } - #[tokio::test] - async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; + mod receiving_an_scrape_request { + use crate::http::asserts::assert_internal_server_error; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; - let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; - assert_internal_server_error(response).await; - } - } -} + let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; -mod public_http_tracker_server { - - mod receiving_an_announce_request { - use std::net::{IpAddr, Ipv4Addr}; - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - - use crate::common::fixtures::sample_peer; - use crate::http::asserts::assert_announce_response; - use crate::http::client::Client; - use crate::http::requests::{AnnounceQuery, Compact, Event}; - use crate::http::responses::{Announce, DictionaryPeer}; - use crate::http::server::start_default_http_tracker; - - fn sample_announce_query(info_hash: &InfoHash) -> AnnounceQuery { - AnnounceQuery { - info_hash: info_hash.0, - peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), - downloaded: 0, - uploaded: 0, - peer_id: peer::Id(*b"-qB00000000000000001").0, - port: 17548, - left: 0, - event: Some(Event::Completed), - compact: Some(Compact::NotAccepted), + assert_internal_server_error(response).await; } } + } - #[tokio::test] - async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let http_tracker_server = start_default_http_tracker().await; - - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&sample_announce_query( - &InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(), - )) + mod configured_as_public { + + mod receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::{assert_announce_response, assert_empty_announce_response}; + use crate::http::client::Client; + use crate::http::requests::AnnounceQueryBuilder; + use crate::http::responses::{Announce, DictionaryPeer}; + use crate::http::server::start_public_http_tracker; + + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let http_tracker_server = start_public_http_tracker().await; + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .into(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![], + }, + ) .await; + } - assert_announce_response( - response, - &Announce { - complete: 1, // the peer for this test - incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, - peers: vec![], - }, - ) - .await; - } - - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers() { - let http_tracker_server = start_default_http_tracker().await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = sample_peer(); - - // Add a peer - http_tracker_server.add_torrent(&info_hash, &peer).await; - - let announce_query = sample_announce_query(&info_hash); - - assert_ne!( - announce_query.peer_id, peer.peer_id.0, - "the new peer id must be different from the previously announced peer otherwise the peer previously added peer in not included in the list" - ); - - // Announce the new peer. This new peer is non included the response peers list - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&announce_query) + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .into(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .into(), + ) + .await; + + let expected_peer = DictionaryPeer { + peer_id: previously_announced_peer.peer_id.to_string(), + ip: previously_announced_peer.peer_addr.ip().to_string(), + port: previously_announced_peer.peer_addr.port(), + }; + + // This new peer is non included on the response peer list + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![expected_peer], + }, + ) .await; - - assert_announce_response( - response, - &Announce { - complete: 2, - incomplete: 0, - interval: 120, - min_interval: 120, - peers: vec![DictionaryPeer { - ip: peer.peer_addr.ip().to_string(), - peer_id: String::new(), - port: peer.peer_addr.port(), - }], - }, - ) - .await; + } } } } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 47fda3af9..5710db6a6 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -114,7 +114,7 @@ mod tracker_apis { use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; - use crate::common::fixtures::sample_peer; + use crate::common::fixtures::PeerBuilder; #[tokio::test] async fn should_allow_getting_tracker_statistics() { @@ -123,7 +123,7 @@ mod tracker_apis { api_server .add_torrent( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), - &sample_peer(), + &PeerBuilder::default().into(), ) .await; @@ -186,7 +186,7 @@ mod tracker_apis { use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::server::start_default_api; - use crate::common::fixtures::sample_peer; + use crate::common::fixtures::PeerBuilder; use crate::common::http::{Query, QueryParam}; #[tokio::test] @@ -195,7 +195,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent(&info_hash, &sample_peer()).await; + api_server.add_torrent(&info_hash, &PeerBuilder::default().into()).await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::empty()) @@ -222,8 +222,8 @@ mod tracker_apis { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server.add_torrent(&info_hash_1, &sample_peer()).await; - api_server.add_torrent(&info_hash_2, &sample_peer()).await; + api_server.add_torrent(&info_hash_1, &PeerBuilder::default().into()).await; + api_server.add_torrent(&info_hash_2, &PeerBuilder::default().into()).await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) @@ -250,8 +250,8 @@ mod tracker_apis { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server.add_torrent(&info_hash_1, &sample_peer()).await; - api_server.add_torrent(&info_hash_2, &sample_peer()).await; + api_server.add_torrent(&info_hash_1, &PeerBuilder::default().into()).await; + api_server.add_torrent(&info_hash_2, &PeerBuilder::default().into()).await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) @@ -323,7 +323,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let peer = sample_peer(); + let peer = PeerBuilder::default().into(); api_server.add_torrent(&info_hash, &peer).await; @@ -384,7 +384,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent(&info_hash, &sample_peer()).await; + api_server.add_torrent(&info_hash, &PeerBuilder::default().into()).await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) From ca8fc22562647649ce05244eddb0d78714d880ea Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Jan 2023 13:41:31 +0000 Subject: [PATCH 272/435] test(http): [#159] add tests for public http tracker --- tests/http_tracker.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index bf75dfc26..c58990616 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -122,6 +122,30 @@ mod http_tracker_server { ) .await; } + + #[tokio::test] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().into(); + + // Add a peer + http_tracker_server.add_torrent(&info_hash, &peer).await; + + let announce_query = AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .into(); + + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&announce_query) + .await; + + assert_empty_announce_response(response).await; + } } } } From 7fa8ec84f9a35e01feaf872db36023c8dea45ed1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Jan 2023 13:48:30 +0000 Subject: [PATCH 273/435] test(http): add test for failing convertion of peer Id into String If you try to convert a peer Id into a String it returns an empty String. ``` let id = peer::Id(*b"-qB00000000000000000"); assert_eq!(id.to_string(), ""); ``` --- src/tracker/peer.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 2da257d3e..dc362c5bd 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -215,6 +215,21 @@ impl Serialize for Id { #[cfg(test)] mod test { + + mod torrent_peer_id { + use crate::tracker::peer; + + #[test] + fn should_be_converted_into_string() { + // todo: it seems it's not working + let id = peer::Id(*b"-qB00000000000000000"); + assert_eq!(id.to_string(), ""); + + let id = peer::Id(*b"-qB00000000000000001"); + assert_eq!(id.to_string(), ""); + } + } + mod torrent_peer { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; From 62dbffacb79503e9921aeb9b4f154bb43bc65f36 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Jan 2023 19:34:55 +0000 Subject: [PATCH 274/435] test(http): [#159] add test for missing announce req params in http tracker --- cSpell.json | 3 + src/http/mod.rs | 10 ++- tests/http/asserts.rs | 61 +++++++++++++++-- tests/http/requests.rs | 146 +++++++++++++++++++++++++++++++--------- tests/http/responses.rs | 6 ++ tests/http_tracker.rs | 74 ++++++++++++++++++-- 6 files changed, 255 insertions(+), 45 deletions(-) diff --git a/cSpell.json b/cSpell.json index 537ea65a5..0d0b73701 100644 --- a/cSpell.json +++ b/cSpell.json @@ -6,6 +6,7 @@ "Avicora", "Azureus", "bencode", + "bencoded", "binascii", "Bitflu", "bools", @@ -39,6 +40,7 @@ "nanos", "nextest", "nocapture", + "numwant", "oneshot", "ostr", "Pando", @@ -62,6 +64,7 @@ "Torrentstorm", "torrust", "torrustracker", + "trackerid", "typenum", "Unamed", "untuple", diff --git a/src/http/mod.rs b/src/http/mod.rs index 2fcb056d8..fa4c263b5 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,7 +1,13 @@ //! Tracker HTTP/HTTPS Protocol: //! -//! -//! +//! Original specification in BEP 3 (section "Trackers"): +//! +//! +//! +//! Other resources: +//! +//! - +//! - //! pub mod error; pub mod filters; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 32aaf4d69..27270f7f2 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,12 +1,7 @@ use reqwest::Response; use super::responses::Announce; - -pub async fn assert_internal_server_error(response: Response) { - assert_eq!(response.status(), 200); - /* cspell:disable-next-line */ - assert_eq!(response.text().await.unwrap(), "d14:failure reason21:internal server errore"); -} +use crate::http::responses::Error; pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); @@ -19,3 +14,57 @@ pub async fn assert_announce_response(response: Response, expected_announce_resp let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); assert_eq!(announce_response, *expected_announce_response); } + +pub async fn assert_is_announce_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let _announce_response: Announce = serde_bencode::from_str(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{}\"", &body)); +} + +// Error responses + +pub async fn assert_internal_server_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'internal server' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "internal server error".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} + +pub async fn assert_invalid_info_hash_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'invalid info_hash' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "info_hash is either missing or invalid".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} + +pub async fn assert_invalid_peer_id_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'invalid peer id' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "peer_id is either missing or invalid".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} diff --git a/tests/http/requests.rs b/tests/http/requests.rs index 7e59494c5..ceff2bd77 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -29,7 +29,7 @@ impl fmt::Display for AnnounceQuery { /// /// /// -/// Some parameters are not implemented yet. +/// Some parameters in the specification are not implemented in this tracker yet. impl AnnounceQuery { /// It builds the URL query component for the announce request. /// @@ -38,35 +38,11 @@ impl AnnounceQuery { /// /// pub fn build(&self) -> String { - let mut params = vec![ - ( - "info_hash", - percent_encoding::percent_encode(&self.info_hash, NON_ALPHANUMERIC).to_string(), - ), - ("peer_addr", self.peer_addr.to_string()), - ("downloaded", self.downloaded.to_string()), - ("uploaded", self.uploaded.to_string()), - ( - "peer_id", - percent_encoding::percent_encode(&self.peer_id, NON_ALPHANUMERIC).to_string(), - ), - ("port", self.port.to_string()), - ("left", self.left.to_string()), - ]; - - if let Some(event) = &self.event { - params.push(("event", event.to_string())); - } - - if let Some(compact) = &self.compact { - params.push(("compact", compact.to_string())); - } + self.params().to_string() + } - params - .iter() - .map(|param| format!("{}={}", param.0, param.1)) - .collect::>() - .join("&") + pub fn params(&self) -> AnnounceQueryParams { + AnnounceQueryParams::from(self) } } @@ -138,7 +114,117 @@ impl AnnounceQueryBuilder { self } - pub fn into(self) -> AnnounceQuery { + pub fn query(self) -> AnnounceQuery { self.announce_query } } + +/// It contains all the GET parameters that can be used in a HTTP Announce request. +/// +/// Sample Announce URL with all the GET parameters (mandatory and optional): +/// +/// ```text +/// http://127.0.0.1:7070/announce? +/// info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 (mandatory) +/// peer_addr=192.168.1.88 +/// downloaded=0 +/// uploaded=0 +/// peer_id=%2DqB00000000000000000 (mandatory) +/// port=17548 (mandatory) +/// left=0 +/// event=completed +/// compact=0 +/// ``` +pub struct AnnounceQueryParams { + pub info_hash: Option, + pub peer_addr: Option, + pub downloaded: Option, + pub uploaded: Option, + pub peer_id: Option, + pub port: Option, + pub left: Option, + pub event: Option, + pub compact: Option, +} + +impl std::fmt::Display for AnnounceQueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let mut params = vec![]; + + if let Some(info_hash) = &self.info_hash { + params.push(("info_hash", info_hash)); + } + if let Some(peer_addr) = &self.peer_addr { + params.push(("peer_addr", peer_addr)); + } + if let Some(downloaded) = &self.downloaded { + params.push(("downloaded", downloaded)); + } + if let Some(uploaded) = &self.uploaded { + params.push(("uploaded", uploaded)); + } + if let Some(peer_id) = &self.peer_id { + params.push(("peer_id", peer_id)); + } + if let Some(port) = &self.port { + params.push(("port", port)); + } + if let Some(left) = &self.left { + params.push(("left", left)); + } + if let Some(event) = &self.event { + params.push(("event", event)); + } + if let Some(compact) = &self.compact { + params.push(("compact", compact)); + } + + let query = params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl AnnounceQueryParams { + pub fn from(announce_query: &AnnounceQuery) -> Self { + let event = announce_query.event.as_ref().map(std::string::ToString::to_string); + let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); + + Self { + info_hash: Some(percent_encoding::percent_encode(&announce_query.info_hash, NON_ALPHANUMERIC).to_string()), + peer_addr: Some(announce_query.peer_addr.to_string()), + downloaded: Some(announce_query.downloaded.to_string()), + uploaded: Some(announce_query.uploaded.to_string()), + peer_id: Some(percent_encoding::percent_encode(&announce_query.peer_id, NON_ALPHANUMERIC).to_string()), + port: Some(announce_query.port.to_string()), + left: Some(announce_query.left.to_string()), + event, + compact, + } + } + + pub fn remove_optional_params(&mut self) { + // todo: make them optional with the Option<...> in the AnnounceQuery struct + // if they are really optional. SO that we can crete a minimal AnnounceQuery + // instead of removing the optional params afterwards. + // + // The original specification on: + // + // says only `ip` and `event` are optional. + // + // On + // says only `ip`, `numwant`, `key` and `trackerid` are optional. + // + // but the server is responding if all these params are not included. + self.peer_addr = None; + self.downloaded = None; + self.uploaded = None; + self.left = None; + self.event = None; + self.compact = None; + } +} diff --git a/tests/http/responses.rs b/tests/http/responses.rs index e82197b03..bb0fda5d8 100644 --- a/tests/http/responses.rs +++ b/tests/http/responses.rs @@ -16,3 +16,9 @@ pub struct DictionaryPeer { pub peer_id: String, pub port: u16, } + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index c58990616..44ec6454c 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -9,22 +9,82 @@ mod http_tracker_server { mod for_all_config_modes { mod receiving_an_announce_request { - use crate::http::asserts::assert_internal_server_error; + use crate::http::asserts::{ + assert_internal_server_error_response, assert_invalid_info_hash_error_response, + assert_invalid_peer_id_error_response, assert_is_announce_response, + }; use crate::http::client::Client; + use crate::http::requests::AnnounceQueryBuilder; use crate::http::server::start_default_http_tracker; + #[tokio::test] + async fn should_respond_when_only_the_mandatory_fields_are_provided() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.remove_optional_params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + #[tokio::test] async fn should_fail_when_the_request_is_empty() { let http_tracker_server = start_default_http_tracker().await; let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; - assert_internal_server_error(response).await; + assert_internal_server_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_when_a_mandatory_field_is_missing() { + let http_tracker_server = start_default_http_tracker().await; + + // Without `info_hash` param + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.info_hash = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + + // Without `peer_id` param + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.peer_id = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + + // Without `port` param + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.port = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; } } mod receiving_an_scrape_request { - use crate::http::asserts::assert_internal_server_error; + use crate::http::asserts::assert_internal_server_error_response; use crate::http::client::Client; use crate::http::server::start_default_http_tracker; @@ -34,7 +94,7 @@ mod http_tracker_server { let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; - assert_internal_server_error(response).await; + assert_internal_server_error_response(response).await; } } } @@ -62,7 +122,7 @@ mod http_tracker_server { .announce( &AnnounceQueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) - .into(), + .query(), ) .await; @@ -99,7 +159,7 @@ mod http_tracker_server { &AnnounceQueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .into(), + .query(), ) .await; @@ -136,7 +196,7 @@ mod http_tracker_server { let announce_query = AnnounceQueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer.peer_id) - .into(); + .query(); assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); From 8ae4928e4d957e07c89a79b81827727ad30972d4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 24 Jan 2023 11:31:43 +0000 Subject: [PATCH 275/435] test(http): [#159] add test for invalid announce request params --- tests/common/fixtures.rs | 12 +++ tests/http/requests.rs | 17 +++- tests/http_tracker.rs | 188 +++++++++++++++++++++++++++++++++++++++ tests/tracker_api.rs | 12 +-- 4 files changed, 219 insertions(+), 10 deletions(-) diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 78f7d381f..0ff6798f6 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -37,3 +37,15 @@ fn default_peer_for_testing() -> Peer { event: AnnounceEvent::Started, } } + +pub fn invalid_info_hashes() -> Vec { + [ + "0".to_string(), + "-1".to_string(), + "1.1".to_string(), + "INVALID INFOHASH".to_string(), + "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40 + "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char + ] + .to_vec() +} diff --git a/tests/http/requests.rs b/tests/http/requests.rs index ceff2bd77..885c48939 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -51,7 +51,7 @@ pub type ByteArray20 = [u8; 20]; pub type PortNumber = u16; pub enum Event { - //tarted, + //Started, //Stopped, Completed, } @@ -227,4 +227,19 @@ impl AnnounceQueryParams { self.event = None; self.compact = None; } + + pub fn set(&mut self, param_name: &str, param_value: &str) { + match param_name { + "info_hash" => self.info_hash = Some(param_value.to_string()), + "peer_addr" => self.peer_addr = Some(param_value.to_string()), + "downloaded" => self.downloaded = Some(param_value.to_string()), + "uploaded" => self.uploaded = Some(param_value.to_string()), + "peer_id" => self.peer_id = Some(param_value.to_string()), + "port" => self.port = Some(param_value.to_string()), + "left" => self.left = Some(param_value.to_string()), + "event" => self.event = Some(param_value.to_string()), + "compact" => self.compact = Some(param_value.to_string()), + &_ => panic!("Invalid param name for announce query"), + } + } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 44ec6454c..a28a9efb1 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -9,6 +9,7 @@ mod http_tracker_server { mod for_all_config_modes { mod receiving_an_announce_request { + use crate::common::fixtures::invalid_info_hashes; use crate::http::asserts::{ assert_internal_server_error_response, assert_invalid_info_hash_error_response, assert_invalid_peer_id_error_response, assert_is_announce_response, @@ -81,6 +82,193 @@ mod http_tracker_server { assert_internal_server_error_response(response).await; } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + } + } + + #[tokio::test] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. + + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + + #[tokio::test] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; + + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + } + } + + #[tokio::test] + async fn should_fail_when_the_port_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("port", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_fail_when_the_left_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("left", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_not_fail_when_the_event_param_is_invalid() { + // All invalid values are ignored as if the `event` param was empty + + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase + "Stopped", // It should be lowercase + "Completed", // It should be lowercase + ]; + + for invalid_value in invalid_values { + params.set("event", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + } + + #[tokio::test] + async fn should_not_fail_when_the_compact_param_is_invalid() { + let http_tracker_server = start_default_http_tracker().await; + + let mut params = AnnounceQueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("compact", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } } mod receiving_an_scrape_request { diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 5710db6a6..b79e8a8af 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -10,18 +10,12 @@ mod common; mod tracker_apis { + use crate::common::fixtures::invalid_info_hashes; + // When these infohashes are used in URL path params // the response is a custom response returned in the handler fn invalid_infohashes_returning_bad_request() -> Vec { - [ - "0".to_string(), - "-1".to_string(), - "1.1".to_string(), - "INVALID INFOHASH".to_string(), - "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40 - "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char - ] - .to_vec() + invalid_info_hashes() } // When these infohashes are used in URL path params From 8e5c99238cce09fa8308845ebb502aa912d1a32f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 24 Jan 2023 20:57:21 +0000 Subject: [PATCH 276/435] refactor(http): [#159] add dependency: serde_bytes It will be used to deserialize bytes from HTTP tracker announce compact responses. For exmaple: ``` pub peers: Vec, ``` --- Cargo.lock | 5 +++-- Cargo.toml | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bc82c64f3..e27eace74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2261,9 +2261,9 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.7" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" +checksum = "718dc5fff5b36f99093fc49b280cfc96ce6fc824317783bff5a1fed0c7a64819" dependencies = [ "serde", ] @@ -2830,6 +2830,7 @@ dependencies = [ "reqwest", "serde", "serde_bencode", + "serde_bytes", "serde_json", "serde_repr", "serde_urlencoded", diff --git a/Cargo.toml b/Cargo.toml index 0e67c65ae..9afbc16a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,3 +67,4 @@ mockall = "0.11" reqwest = { version = "0.11.13", features = ["json"] } serde_urlencoded = "0.7.1" serde_repr = "0.1.10" +serde_bytes = "0.11.8" From 96fb56cde69387118e36f5f0b9e20da6bdde200b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 24 Jan 2023 21:00:10 +0000 Subject: [PATCH 277/435] test(http): [#159] add test for compact announce response --- tests/http/asserts.rs | 29 +++++++++++++-- tests/http/requests.rs | 9 +++-- tests/http/responses.rs | 80 ++++++++++++++++++++++++++++++++++++++++- tests/http_tracker.rs | 47 ++++++++++++++++++++++-- 4 files changed, 156 insertions(+), 9 deletions(-) diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 27270f7f2..b5d84b0a1 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,7 +1,7 @@ use reqwest::Response; -use super::responses::Announce; -use crate::http::responses::Error; +use super::responses::{Announce, DecodedCompactAnnounce}; +use crate::http::responses::{CompactAnnounce, Error}; pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); @@ -11,10 +11,33 @@ pub async fn assert_empty_announce_response(response: Response) { pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { assert_eq!(response.status(), 200); - let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); + let body = response.text().await.unwrap(); + let announce_response: Announce = serde_bencode::from_str(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{}\"", &body)); assert_eq!(announce_response, *expected_announce_response); } +/// Sample bencoded response as byte array: +/// +/// ```text +/// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" +/// ``` +pub async fn assert_compact_announce_response(response: Response, expected_response: &DecodedCompactAnnounce) { + assert_eq!(response.status(), 200); + + let bytes = response.bytes().await.unwrap(); + + let compact_announce: CompactAnnounce = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { + panic!( + "response body should be a valid compact announce response, got \"{:?}\"", + &bytes + ) + }); + let actual_response = DecodedCompactAnnounce::from(compact_announce); + + assert_eq!(actual_response, *expected_response); +} + pub async fn assert_is_announce_response(response: Response) { assert_eq!(response.status(), 200); let body = response.text().await.unwrap(); diff --git a/tests/http/requests.rs b/tests/http/requests.rs index 885c48939..5453d9261 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -69,14 +69,14 @@ impl fmt::Display for Event { #[derive(Serialize_repr, PartialEq, Debug)] #[repr(u8)] pub enum Compact { - //Accepted = 1, + Accepted = 1, NotAccepted = 0, } impl fmt::Display for Compact { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - //Compact::Accepted => write!(f, "1"), + Compact::Accepted => write!(f, "1"), Compact::NotAccepted => write!(f, "0"), } } @@ -114,6 +114,11 @@ impl AnnounceQueryBuilder { self } + pub fn with_compact(mut self, compact: Compact) -> Self { + self.announce_query.compact = Some(compact); + self + } + pub fn query(self) -> AnnounceQuery { self.announce_query } diff --git a/tests/http/responses.rs b/tests/http/responses.rs index bb0fda5d8..0bef39b09 100644 --- a/tests/http/responses.rs +++ b/tests/http/responses.rs @@ -1,3 +1,5 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use serde::{self, Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq)] @@ -7,7 +9,7 @@ pub struct Announce { pub interval: u32, #[serde(rename = "min interval")] pub min_interval: u32, - pub peers: Vec, + pub peers: Vec, // Peers with IPV4 } #[derive(Serialize, Deserialize, Debug, PartialEq)] @@ -17,6 +19,82 @@ pub struct DictionaryPeer { pub port: u16, } +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct CompactAnnounce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + #[serde(with = "serde_bytes")] + pub peers: Vec, +} + +#[derive(Debug, PartialEq)] +pub struct DecodedCompactAnnounce { + // code-review: there could be a way to deserialize this struct directly + // by using serde instead of doing it manually. Or at least using a custom deserializer. + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + pub min_interval: u32, + pub peers: CompactPeerList, +} + +#[derive(Debug, PartialEq)] +pub struct CompactPeerList { + peers: Vec, +} + +impl CompactPeerList { + pub fn new(peers: Vec) -> Self { + Self { peers } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeer { + ip: Ipv4Addr, + port: u16, +} + +impl CompactPeer { + pub fn new(socket_addr: &SocketAddr) -> Self { + match socket_addr.ip() { + IpAddr::V4(ip) => Self { + ip, + port: socket_addr.port(), + }, + IpAddr::V6(_ip) => panic!("IPV6 is not supported for compact peer"), + } + } + + pub fn new_from_bytes(bytes: &[u8]) -> Self { + Self { + ip: Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), + port: u16::from_be_bytes([bytes[4], bytes[5]]), + } + } +} + +impl From for DecodedCompactAnnounce { + fn from(compact_announce: CompactAnnounce) -> Self { + let mut peers = vec![]; + + for peer_bytes in compact_announce.peers.chunks_exact(6) { + peers.push(CompactPeer::new_from_bytes(peer_bytes)); + } + + Self { + complete: compact_announce.complete, + incomplete: compact_announce.incomplete, + interval: compact_announce.interval, + min_interval: compact_announce.min_interval, + peers: CompactPeerList::new(peers), + } + } +} + #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Error { #[serde(rename = "failure reason")] diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a28a9efb1..5b492cbc1 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -296,10 +296,12 @@ mod http_tracker_server { use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::{assert_announce_response, assert_empty_announce_response}; + use crate::http::asserts::{ + assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, + }; use crate::http::client::Client; - use crate::http::requests::AnnounceQueryBuilder; - use crate::http::responses::{Announce, DictionaryPeer}; + use crate::http::requests::{AnnounceQueryBuilder, Compact}; + use crate::http::responses::{Announce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer}; use crate::http::server::start_public_http_tracker; #[tokio::test] @@ -394,6 +396,45 @@ mod http_tracker_server { assert_empty_announce_response(response).await; } + + #[tokio::test] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .into(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 accepting compact responses + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; + + let expected_response = DecodedCompactAnnounce { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; + + assert_compact_announce_response(response, &expected_response).await; + } } } } From 3fce688466034f85ccee6f4128f65b8856780d9f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 25 Jan 2023 10:30:16 +0000 Subject: [PATCH 278/435] refactor(http): extract converter --- tests/http/responses.rs | 11 +++++++++++ tests/http_tracker.rs | 12 +++--------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/http/responses.rs b/tests/http/responses.rs index 0bef39b09..7cf283916 100644 --- a/tests/http/responses.rs +++ b/tests/http/responses.rs @@ -1,6 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use serde::{self, Deserialize, Serialize}; +use torrust_tracker::tracker::peer::Peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -19,6 +20,16 @@ pub struct DictionaryPeer { pub port: u16, } +impl From for DictionaryPeer { + fn from(peer: Peer) -> Self { + DictionaryPeer { + peer_id: peer.peer_id.to_string(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} + #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct CompactAnnounce { pub complete: u32, diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 5b492cbc1..a6f44acec 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -343,7 +343,7 @@ mod http_tracker_server { // Add the Peer 1 http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; - // Announce the new Peer 2 + // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(http_tracker_server.get_connection_info()) .announce( &AnnounceQueryBuilder::default() @@ -353,13 +353,7 @@ mod http_tracker_server { ) .await; - let expected_peer = DictionaryPeer { - peer_id: previously_announced_peer.peer_id.to_string(), - ip: previously_announced_peer.peer_addr.ip().to_string(), - port: previously_announced_peer.peer_addr.port(), - }; - - // This new peer is non included on the response peer list + // It should only contain teh previously announced peer assert_announce_response( response, &Announce { @@ -367,7 +361,7 @@ mod http_tracker_server { incomplete: 0, interval: http_tracker_server.tracker.config.announce_interval, min_interval: http_tracker_server.tracker.config.min_announce_interval, - peers: vec![expected_peer], + peers: vec![DictionaryPeer::from(previously_announced_peer)], }, ) .await; From 85a489426c0562627d23728d4ab691ec8949cd89 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 25 Jan 2023 11:08:10 +0000 Subject: [PATCH 279/435] test(http): [#159] add test for default announce response format --- tests/http/requests.rs | 5 +++++ tests/http_tracker.rs | 44 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/tests/http/requests.rs b/tests/http/requests.rs index 5453d9261..e24103b79 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -119,6 +119,11 @@ impl AnnounceQueryBuilder { self } + pub fn without_compact(mut self) -> Self { + self.announce_query.compact = None; + self + } + pub fn query(self) -> AnnounceQuery { self.announce_query } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a6f44acec..26c378b44 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -292,6 +292,7 @@ mod http_tracker_server { mod receiving_an_announce_request { use std::str::FromStr; + use reqwest::Response; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -301,7 +302,9 @@ mod http_tracker_server { }; use crate::http::client::Client; use crate::http::requests::{AnnounceQueryBuilder, Compact}; - use crate::http::responses::{Announce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer}; + use crate::http::responses::{ + Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, + }; use crate::http::server::start_public_http_tracker; #[tokio::test] @@ -429,6 +432,45 @@ mod http_tracker_server { assert_compact_announce_response(response, &expected_response).await; } + + #[tokio::test] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. + + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .into(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; + + assert!(!is_a_compact_announce_response(response).await); + } + + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } } } } From 080f3c43ad55a1a44ae1507c31b21a82e2a944c3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 25 Jan 2023 13:03:55 +0000 Subject: [PATCH 280/435] test(http): [#159] add tests for uptadint statistics after announce request --- tests/http/client.rs | 29 ++++++++----- tests/http/requests.rs | 5 +++ tests/http/server.rs | 21 ++++++++++ tests/http_tracker.rs | 93 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 136 insertions(+), 12 deletions(-) diff --git a/tests/http/client.rs b/tests/http/client.rs index ae51bc02e..df9e012f0 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -1,4 +1,6 @@ -use reqwest::Response; +use std::net::IpAddr; + +use reqwest::{Client as ReqwestClient, Response}; use super::connection_info::ConnectionInfo; use super::requests::AnnounceQuery; @@ -6,26 +8,31 @@ use super::requests::AnnounceQuery; /// HTTP Tracker Client pub struct Client { connection_info: ConnectionInfo, + reqwest_client: ReqwestClient, } impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { - Self { connection_info } + Self { + connection_info, + reqwest_client: reqwest::Client::builder().build().unwrap(), + } + } + + /// Creates the new client binding it to an specific local address + pub fn bind(connection_info: ConnectionInfo, local_address: IpAddr) -> Self { + Self { + connection_info, + reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), + } } pub async fn announce(&self, query: &AnnounceQuery) -> Response { - let path_with_query = format!("announce?{query}"); - self.get(&path_with_query).await + self.get(&format!("announce?{query}")).await } pub async fn get(&self, path: &str) -> Response { - reqwest::Client::builder() - .build() - .unwrap() - .get(self.base_url(path)) - .send() - .await - .unwrap() + self.reqwest_client.get(self.base_url(path)).send().await.unwrap() } fn base_url(&self, path: &str) -> String { diff --git a/tests/http/requests.rs b/tests/http/requests.rs index e24103b79..9135020e9 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests.rs @@ -119,6 +119,11 @@ impl AnnounceQueryBuilder { self } + pub fn with_peer_addr(mut self, peer_addr: &IpAddr) -> Self { + self.announce_query.peer_addr = *peer_addr; + self + } + pub fn without_compact(mut self) -> Self { self.announce_query.compact = None; self diff --git a/tests/http/server.rs b/tests/http/server.rs index 32d02b060..0eb672e58 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -1,4 +1,5 @@ use core::panic; +use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; @@ -10,10 +11,30 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; +/// Starts a HTTP tracker with mode "public" pub async fn start_public_http_tracker() -> Server { start_default_http_tracker().await } +/// Starts a HTTP tracker with a wildcard IPV6 address. +/// The configuration in the `config.toml` file would be like this: +/// +/// ```text +/// [[http_trackers]] +/// bind_address = "[::]:7070" +/// ``` +pub async fn start_ipv6_http_tracker() -> Server { + let mut configuration = ephemeral_configuration(); + + // Change socket address to "wildcard address" (unspecified address which means any IP address) + // but keeping the random port generated with the ephemeral configuration. + let socket_addr: SocketAddr = configuration.http_trackers[0].bind_address.parse().unwrap(); + let new_ipv6_socket_address = format!("[::]:{}", socket_addr.port()); + configuration.http_trackers[0].bind_address = new_ipv6_socket_address; + + start_custom_http_tracker(Arc::new(configuration)).await +} + pub async fn start_default_http_tracker() -> Server { let configuration = tracker_configuration(); start_custom_http_tracker(configuration.clone()).await diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 26c378b44..c29c6af6f 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -290,6 +290,7 @@ mod http_tracker_server { mod configured_as_public { mod receiving_an_announce_request { + use std::net::{IpAddr, Ipv6Addr}; use std::str::FromStr; use reqwest::Response; @@ -305,7 +306,7 @@ mod http_tracker_server { use crate::http::responses::{ Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, }; - use crate::http::server::start_public_http_tracker; + use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { @@ -471,6 +472,96 @@ mod http_tracker_server { let compact_announce = serde_bencode::from_bytes::(&bytes); compact_announce.is_ok() } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker().await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker().await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker().await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 0); + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker().await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker().await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker().await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &AnnounceQueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 0); + } } } } From 5cc2ac1595c802e9f25d2d98085cedc097f69be4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 12:00:36 +0000 Subject: [PATCH 281/435] refactor(http): [#159] add dependency local-ip-address We need it to get the address of the HTTP client we use in tests. --- Cargo.lock | 23 +++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 24 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index e27eace74..8347362ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1268,6 +1268,18 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "local-ip-address" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faa9d02443a1741e9f51dafdfcbffb3863b2a89c457d762b40337d6c5153ef81" +dependencies = [ + "libc", + "neli", + "thiserror", + "windows-sys 0.42.0", +] + [[package]] name = "lock_api" version = "0.4.9" @@ -1495,6 +1507,16 @@ dependencies = [ "tempfile", ] +[[package]] +name = "neli" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9053554eb5dcb7e10d9cdab1206965bde870eed5d0d341532ca035e3ba221508" +dependencies = [ + "byteorder", + "libc", +] + [[package]] name = "nix" version = "0.23.1" @@ -2819,6 +2841,7 @@ dependencies = [ "futures", "hex", "lazy_static", + "local-ip-address", "log", "mockall", "openssl", diff --git a/Cargo.toml b/Cargo.toml index 9afbc16a8..cf90da8f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,3 +68,4 @@ reqwest = { version = "0.11.13", features = ["json"] } serde_urlencoded = "0.7.1" serde_repr = "0.1.10" serde_bytes = "0.11.8" +local-ip-address = "0.5.1" From 452b81abb546b7cbf53ab643c936b8a0a365e27e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 12:03:09 +0000 Subject: [PATCH 282/435] test(http): [#159] add tests for assigning IP to peers in announce request --- tests/http/client.rs | 13 +++ tests/http/server.rs | 26 ++++- tests/http_tracker.rs | 216 ++++++++++++++++++++++++++++++++++-------- 3 files changed, 213 insertions(+), 42 deletions(-) diff --git a/tests/http/client.rs b/tests/http/client.rs index df9e012f0..d66ec2a00 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -31,10 +31,23 @@ impl Client { self.get(&format!("announce?{query}")).await } + pub async fn announce_with_header(&self, query: &AnnounceQuery, key: &str, value: &str) -> Response { + self.get_with_header(&format!("announce?{query}"), key, value).await + } + pub async fn get(&self, path: &str) -> Response { self.reqwest_client.get(self.base_url(path)).send().await.unwrap() } + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { + self.reqwest_client + .get(self.base_url(path)) + .header(key, value) + .send() + .await + .unwrap() + } + fn base_url(&self, path: &str) -> String { format!("http://{}/{path}", &self.connection_info.bind_address) } diff --git a/tests/http/server.rs b/tests/http/server.rs index 0eb672e58..506bf75e7 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -1,5 +1,5 @@ use core::panic; -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; @@ -35,6 +35,30 @@ pub async fn start_ipv6_http_tracker() -> Server { start_custom_http_tracker(Arc::new(configuration)).await } +/// Starts a HTTP tracker with an specific `external_ip`. +/// The configuration in the `config.toml` file would be like this: +/// +/// ```text +/// external_ip = "2.137.87.41" +/// ``` +pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr) -> Server { + let mut configuration = ephemeral_configuration(); + configuration.external_ip = Some(external_ip.to_string()); + start_custom_http_tracker(Arc::new(configuration)).await +} + +/// Starts a HTTP tracker `on_reverse_proxy`. +/// The configuration in the `config.toml` file would be like this: +/// +/// ```text +/// on_reverse_proxy = true +/// ``` +pub async fn start_http_tracker_on_reverse_proxy() -> Server { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = true; + start_custom_http_tracker(Arc::new(configuration)).await +} + pub async fn start_default_http_tracker() -> Server { let configuration = tracker_configuration(); start_custom_http_tracker(configuration.clone()).await diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index c29c6af6f..2a1a6004b 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -9,14 +9,29 @@ mod http_tracker_server { mod for_all_config_modes { mod receiving_an_announce_request { - use crate::common::fixtures::invalid_info_hashes; + use std::net::{IpAddr, Ipv6Addr}; + use std::str::FromStr; + + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ + assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, assert_internal_server_error_response, assert_invalid_info_hash_error_response, assert_invalid_peer_id_error_response, assert_is_announce_response, }; use crate::http::client::Client; - use crate::http::requests::AnnounceQueryBuilder; - use crate::http::server::start_default_http_tracker; + use crate::http::requests::{AnnounceQueryBuilder, Compact}; + use crate::http::responses::{ + Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, + }; + use crate::http::server::{ + start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, + start_ipv6_http_tracker, start_public_http_tracker, + }; #[tokio::test] async fn should_respond_when_only_the_mandatory_fields_are_provided() { @@ -269,44 +284,6 @@ mod http_tracker_server { assert_internal_server_error_response(response).await; } } - } - - mod receiving_an_scrape_request { - use crate::http::asserts::assert_internal_server_error_response; - use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; - - #[tokio::test] - async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; - - let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; - - assert_internal_server_error_response(response).await; - } - } - } - - mod configured_as_public { - - mod receiving_an_announce_request { - use std::net::{IpAddr, Ipv6Addr}; - use std::str::FromStr; - - use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::{ - assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, - }; - use crate::http::client::Client; - use crate::http::requests::{AnnounceQueryBuilder, Compact}; - use crate::http::responses::{ - Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, - }; - use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { @@ -562,6 +539,163 @@ mod http_tracker_server { assert_eq!(stats.tcp6_announces_handled, 0); } + + #[tokio::test] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ + + let http_tracker_server = start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ + + let http_tracker_server = + start_http_tracker_with_external_ip(&IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = AnnounceQueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let http_tracker_server = start_http_tracker_on_reverse_proxy().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let client = Client::new(http_tracker_server.get_connection_info()); + + let announce_query = AnnounceQueryBuilder::default().with_info_hash(&info_hash).query(); + + // todo: shouldn't be the the leftmost IP address? + // THe application is taken the the rightmost IP address. See function http::filters::peer_addr + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + } } + + mod receiving_an_scrape_request { + use crate::http::asserts::assert_internal_server_error_response; + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; + + #[tokio::test] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker().await; + + let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + + assert_internal_server_error_response(response).await; + } + } + } + + mod configured_as_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} + } + + mod configured_as_private { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} + } + + mod configured_as_private_and_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} } } From 86155d6b337c8421e1b6d439fe4befc0a5cc6a63 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 12:29:58 +0000 Subject: [PATCH 283/435] refactor(http): improve readability --- tests/http_tracker.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 2a1a6004b..9cd43a155 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -25,9 +25,7 @@ mod http_tracker_server { }; use crate::http::client::Client; use crate::http::requests::{AnnounceQueryBuilder, Compact}; - use crate::http::responses::{ - Announce, CompactAnnounce, CompactPeer, CompactPeerList, DecodedCompactAnnounce, DictionaryPeer, - }; + use crate::http::responses::{self, Announce, CompactAnnounce, CompactPeer, CompactPeerList, DictionaryPeer}; use crate::http::server::{ start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, start_ipv6_http_tracker, start_public_http_tracker, @@ -400,7 +398,7 @@ mod http_tracker_server { ) .await; - let expected_response = DecodedCompactAnnounce { + let expected_response = responses::DecodedCompactAnnounce { complete: 2, incomplete: 0, interval: 120, From 11492a32b666747194153f74e7ecc646d429953a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 13:12:09 +0000 Subject: [PATCH 284/435] test(http): [#159] add tests for announce request in whitelisted mode --- tests/http/asserts.rs | 15 +++++++++++++++ tests/http/server.rs | 12 +++++++++++- tests/http_tracker.rs | 43 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 68 insertions(+), 2 deletions(-) diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index b5d84b0a1..60a6a2013 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -91,3 +91,18 @@ pub async fn assert_invalid_peer_id_error_response(response: Response) { }; assert_eq!(error_response, expected_error_response); } + +pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'torrent not on whitelist' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "torrent not on whitelist".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} diff --git a/tests/http/server.rs b/tests/http/server.rs index 506bf75e7..5cd1fec19 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::jobs::http_tracker; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::mode::Mode; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; @@ -13,7 +14,16 @@ use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" pub async fn start_public_http_tracker() -> Server { - start_default_http_tracker().await + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Public; + start_custom_http_tracker(Arc::new(configuration)).await +} + +/// Starts a HTTP tracker with mode "listed" +pub async fn start_whitelisted_http_tracker() -> Server { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Listed; + start_custom_http_tracker(Arc::new(configuration)).await } /// Starts a HTTP tracker with a wildcard IPV6 address. diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 9cd43a155..05a2dfba1 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -678,7 +678,48 @@ mod http_tracker_server { mod configured_as_whitelisted { - mod and_receiving_an_announce_request {} + mod and_receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::http::client::Client; + use crate::http::requests::AnnounceQueryBuilder; + use crate::http::server::start_whitelisted_http_tracker; + + #[tokio::test] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let http_tracker_server = start_whitelisted_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_torrent_not_in_whitelist_error_response(response).await; + } + + #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { + let http_tracker_server = start_whitelisted_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker_server + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_is_announce_response(response).await; + } + } mod receiving_an_scrape_request {} } From badb7912091efde29da415c5527b6571c960e29f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Jan 2023 18:05:48 +0000 Subject: [PATCH 285/435] test(http): [#159] add tests for announce request in private mode --- src/tracker/auth.rs | 10 ++++++ tests/http/asserts.rs | 30 ++++++++++++++++++ tests/http/client.rs | 51 +++++++++++++++++++++++++---- tests/http/connection_info.rs | 6 ++-- tests/http/server.rs | 7 ++++ tests/http_tracker.rs | 60 ++++++++++++++++++++++++++++++++++- 6 files changed, 153 insertions(+), 11 deletions(-) diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index c4062bc68..3b8af96a1 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -78,6 +78,16 @@ impl Key { None } } + + /// # Panics + /// + /// Will fail if the key id is not a valid key id. + #[must_use] + pub fn id(&self) -> KeyId { + // todo: replace the type of field `key` with type `KeyId`. + // The constructor should fail if an invalid KeyId is provided. + KeyId::from_str(&self.key).unwrap() + } } #[derive(Debug, Display, PartialEq, Clone)] diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 60a6a2013..cf4683a7b 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -106,3 +106,33 @@ pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) }; assert_eq!(error_response, expected_error_response); } + +pub async fn assert_peer_not_authenticated_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'peer not authenticated' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "peer not authenticated".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} + +pub async fn assert_invalid_authentication_key_error_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { + panic!( + "response body should be a valid bencoded string for the 'invalid authentication key' error, got \"{}\"", + &body + ) + }); + let expected_error_response = Error { + failure_reason: "invalid authentication key".to_string(), + }; + assert_eq!(error_response, expected_error_response); +} diff --git a/tests/http/client.rs b/tests/http/client.rs index d66ec2a00..062484e83 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -1,6 +1,7 @@ use std::net::IpAddr; use reqwest::{Client as ReqwestClient, Response}; +use torrust_tracker::tracker::auth::KeyId; use super::connection_info::ConnectionInfo; use super::requests::AnnounceQuery; @@ -9,13 +10,23 @@ use super::requests::AnnounceQuery; pub struct Client { connection_info: ConnectionInfo, reqwest_client: ReqwestClient, + key_id: Option, } +/// URL components in this context: +/// +/// ```text +/// http://127.0.0.1:62304/announce/YZ....rJ?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// \_____________________/\_______________/ \__________________________________________________________/ +/// | | | +/// base url path query +/// ``` impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { Self { connection_info, reqwest_client: reqwest::Client::builder().build().unwrap(), + key_id: None, } } @@ -24,31 +35,57 @@ impl Client { Self { connection_info, reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), + key_id: None, + } + } + + pub fn authenticated(connection_info: ConnectionInfo, key_id: KeyId) -> Self { + Self { + connection_info, + reqwest_client: reqwest::Client::builder().build().unwrap(), + key_id: Some(key_id), } } pub async fn announce(&self, query: &AnnounceQuery) -> Response { - self.get(&format!("announce?{query}")).await + self.get(&self.build_announce_path_and_query(query)).await } - pub async fn announce_with_header(&self, query: &AnnounceQuery, key: &str, value: &str) -> Response { - self.get_with_header(&format!("announce?{query}"), key, value).await + pub async fn announce_with_header(&self, query: &AnnounceQuery, key_id: &str, value: &str) -> Response { + self.get_with_header(&self.build_announce_path_and_query(query), key_id, value) + .await } pub async fn get(&self, path: &str) -> Response { - self.reqwest_client.get(self.base_url(path)).send().await.unwrap() + self.reqwest_client.get(self.build_url(path)).send().await.unwrap() } pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { self.reqwest_client - .get(self.base_url(path)) + .get(self.build_url(path)) .header(key, value) .send() .await .unwrap() } - fn base_url(&self, path: &str) -> String { - format!("http://{}/{path}", &self.connection_info.bind_address) + fn build_announce_path_and_query(&self, query: &AnnounceQuery) -> String { + format!("{}?{query}", self.build_path("announce")) + } + + fn build_path(&self, path: &str) -> String { + match &self.key_id { + Some(key_id) => format!("{path}/{key_id}"), + None => path.to_string(), + } + } + + fn build_url(&self, path: &str) -> String { + let base_url = self.base_url(); + format!("{base_url}{path}") + } + + fn base_url(&self) -> String { + format!("http://{}/", &self.connection_info.bind_address) } } diff --git a/tests/http/connection_info.rs b/tests/http/connection_info.rs index debf931e4..fb1dbf64e 100644 --- a/tests/http/connection_info.rs +++ b/tests/http/connection_info.rs @@ -1,16 +1,16 @@ -use torrust_tracker::tracker::auth::Key; +use torrust_tracker::tracker::auth::KeyId; #[derive(Clone, Debug)] pub struct ConnectionInfo { pub bind_address: String, - pub aut_key: Option, + pub key_id: Option, } impl ConnectionInfo { pub fn anonymous(bind_address: &str) -> Self { Self { bind_address: bind_address.to_string(), - aut_key: None, + key_id: None, } } } diff --git a/tests/http/server.rs b/tests/http/server.rs index 5cd1fec19..6741d8e97 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -26,6 +26,13 @@ pub async fn start_whitelisted_http_tracker() -> Server { start_custom_http_tracker(Arc::new(configuration)).await } +/// Starts a HTTP tracker with mode "listed" +pub async fn start_private_http_tracker() -> Server { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Private; + start_custom_http_tracker(Arc::new(configuration)).await +} + /// Starts a HTTP tracker with a wildcard IPV6 address. /// The configuration in the `config.toml` file would be like this: /// diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 05a2dfba1..65f42f415 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -726,7 +726,65 @@ mod http_tracker_server { mod configured_as_private { - mod and_receiving_an_announce_request {} + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + + use crate::http::asserts::{ + assert_invalid_authentication_key_error_response, assert_is_announce_response, + assert_peer_not_authenticated_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::AnnounceQueryBuilder; + use crate::http::server::start_private_http_tracker; + + #[tokio::test] + async fn should_respond_to_peers_providing_a_valid_authentication_key() { + let http_tracker_server = start_private_http_tracker().await; + + let key = http_tracker_server + .tracker + .generate_auth_key(Duration::from_secs(60)) + .await + .unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + assert_is_announce_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let http_tracker_server = start_private_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_peer_not_authenticated_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_authentication_key_is_not_valid() { + let http_tracker_server = start_private_http_tracker().await; + + // The tracker does not have this key + let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) + .announce(&AnnounceQueryBuilder::default().query()) + .await; + + assert_invalid_authentication_key_error_response(response).await; + } + } mod receiving_an_scrape_request {} } From e1765f315b5e2f4d3f30ed3678865e3dd4b6ceda Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 27 Jan 2023 16:25:52 +0000 Subject: [PATCH 286/435] fix: new clippy errors --- src/apis/middlewares/auth.rs | 5 +---- src/apis/responses.rs | 3 +-- src/tracker/services/torrent.rs | 9 +++------ tests/api/asserts.rs | 5 +---- 4 files changed, 6 insertions(+), 16 deletions(-) diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs index e54311d33..758ba1cda 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/middlewares/auth.rs @@ -25,10 +25,7 @@ pub async fn auth( where B: Send, { - let token = match params.token { - None => return AuthError::Unauthorized.into_response(), - Some(token) => token, - }; + let Some(token) = params.token else { return AuthError::Unauthorized.into_response() }; if !authenticate(&token, &config.http_api) { return AuthError::TokenNotValid.into_response(); diff --git a/src/apis/responses.rs b/src/apis/responses.rs index b3d4cbd59..b150b4bff 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -86,8 +86,7 @@ pub fn ok_response() -> Response { #[must_use] pub fn invalid_info_hash_param_response(info_hash: &str) -> Response { bad_request_response(&format!( - "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", - info_hash + "Invalid URL: invalid infohash param: string \"{info_hash}\", expected a 40 character long string" )) } diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index a08fd54d1..ba66d15f4 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -74,12 +74,9 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op let torrent_entry_option = db.get(info_hash); - let torrent_entry = match torrent_entry_option { - Some(torrent_entry) => torrent_entry, - None => { - return None; - } - }; + let Some(torrent_entry) = torrent_entry_option else { + return None; + }; let (seeders, completed, leechers) = torrent_entry.get_stats(); diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 07383f795..5f9d39705 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -66,10 +66,7 @@ pub async fn assert_torrent_not_known(response: Response) { pub async fn assert_invalid_infohash_param(response: Response, invalid_infohash: &str) { assert_bad_request( response, - &format!( - "Invalid URL: invalid infohash param: string \"{}\", expected a 40 character long string", - invalid_infohash - ), + &format!("Invalid URL: invalid infohash param: string \"{invalid_infohash}\", expected a 40 character long string"), ) .await; } From fcd60e221e87b2d43f3422227154b6e5825c0c73 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 27 Jan 2023 18:50:41 +0000 Subject: [PATCH 287/435] fix(http): Display impl for tracker::peer:ID ``` peer::Id(*b"-qB00000000000000000").to_string() ``` was always returning an empty string. It has been changed to return the Hex representations of the byte array. --- src/apis/resources/peer.rs | 2 +- src/tracker/peer.rs | 56 +++++++++++++++++++++++------- tests/http_tracker.rs | 70 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+), 13 deletions(-) diff --git a/src/apis/resources/peer.rs b/src/apis/resources/peer.rs index ff84be197..5284d26f6 100644 --- a/src/apis/resources/peer.rs +++ b/src/apis/resources/peer.rs @@ -24,7 +24,7 @@ pub struct Id { impl From for Id { fn from(peer_id: tracker::peer::Id) -> Self { Id { - id: peer_id.get_id(), + id: peer_id.to_hex_string(), client: peer_id.get_client_name().map(std::string::ToString::to_string), } } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index dc362c5bd..5da894f54 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -93,10 +93,8 @@ pub struct Id(pub [u8; 20]); impl std::fmt::Display for Id { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut buffer = [0u8; 20]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); - match bytes_out { - Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), + match self.to_hex_string() { + Some(hex) => write!(f, "{hex}"), None => write!(f, ""), } } @@ -104,14 +102,36 @@ impl std::fmt::Display for Id { impl Id { #[must_use] + /// Converts to hex string. + /// + /// For the Id `-qB00000000000000000` ti returns `2d71423030303030303030303030303030303030` + /// + /// For example: + /// + ///```text + /// Bytes = Hex + /// -qB00000000000000000 = 2d71423030303030303030303030303030303030 + /// -qB00000000000000000 = 2d 71 42 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 + /// + /// ------------- + /// |Char | Hex | + /// ------------- + /// | - | 2D | + /// | q | 71 | + /// | B | 42 | + /// | 0 | 30 | + /// ------------- + /// ``` + /// + /// Return `None` is some of the bytes are invalid UTF8 values. + /// /// # Panics /// /// It will panic if the `binascii::bin2hex` from a too-small output buffer. - pub fn get_id(&self) -> Option { + pub fn to_hex_string(&self) -> Option { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; binascii::bin2hex(&self.0, &mut tmp).unwrap(); - std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) } @@ -206,7 +226,7 @@ impl Serialize for Id { } let obj = PeerIdInfo { - id: self.get_id(), + id: self.to_hex_string(), client: self.get_client_name(), }; obj.serialize(serializer) @@ -220,13 +240,25 @@ mod test { use crate::tracker::peer; #[test] - fn should_be_converted_into_string() { - // todo: it seems it's not working + fn should_be_converted_to_hex_string() { + let id = peer::Id(*b"-qB00000000000000000"); + assert_eq!(id.to_hex_string().unwrap(), "2d71423030303030303030303030303030303030"); + + let id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + assert_eq!(id.to_hex_string().unwrap(), "009f9296009f9296009f9296009f9296009f9296"); + } + + #[test] + fn should_be_converted_into_string_type_using_the_hex_string_format() { let id = peer::Id(*b"-qB00000000000000000"); - assert_eq!(id.to_string(), ""); + assert_eq!(id.to_string(), "2d71423030303030303030303030303030303030"); - let id = peer::Id(*b"-qB00000000000000001"); - assert_eq!(id.to_string(), ""); + let id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + assert_eq!(id.to_string(), "009f9296009f9296009f9296009f9296009f9296"); } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 65f42f415..cec0e4f88 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -5,6 +5,19 @@ mod common; mod http; mod http_tracker_server { + use std::str::FromStr; + + use percent_encoding::NON_ALPHANUMERIC; + use torrust_tracker::protocol::info_hash::InfoHash; + + #[test] + fn calculate_info_hash_param() { + let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let param = percent_encoding::percent_encode(&info_hash.0, NON_ALPHANUMERIC).to_string(); + + assert_eq!(param, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"); + } mod for_all_config_modes { @@ -796,3 +809,60 @@ mod http_tracker_server { mod receiving_an_scrape_request {} } } + +mod percent_encoding { + // todo: these operations are used in the HTTP tracker but they have not been extracted into independent functions. + // These tests document the operations. This behavior could be move to some functions int he future if they are extracted. + + use std::str::FromStr; + + use percent_encoding::NON_ALPHANUMERIC; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + #[test] + fn how_to_encode_an_info_hash() { + let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let encoded_info_hash = percent_encoding::percent_encode(&info_hash.0, NON_ALPHANUMERIC).to_string(); + + assert_eq!(encoded_info_hash, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"); + } + + #[test] + fn how_to_decode_an_info_hash() { + let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; + + let info_hash_bytes = percent_encoding::percent_decode_str(encoded_infohash).collect::>(); + let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)).unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() + ); + } + + #[test] + fn how_to_encode_a_peer_id() { + let peer_id = peer::Id(*b"-qB00000000000000000"); + + let encoded_peer_id = percent_encoding::percent_encode(&peer_id.0, NON_ALPHANUMERIC).to_string(); + + assert_eq!(encoded_peer_id, "%2DqB00000000000000000"); + } + + #[test] + fn how_to_decode_a_peer_id() { + let encoded_peer_id = "%2DqB00000000000000000"; + + let bytes_vec = percent_encoding::percent_decode_str(encoded_peer_id).collect::>(); + + // Clone peer_id_bytes into fixed length array + let mut peer_id_bytes: [u8; 20] = Default::default(); + peer_id_bytes.clone_from_slice(bytes_vec.as_slice()); + + let peer_id = peer::Id(peer_id_bytes); + + assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); + } +} From 953a1000d176ba8d28d7b6e3cd26b1dc723ca5a7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 11:08:36 +0000 Subject: [PATCH 288/435] docs(http): [#159] add links to info about scrape requests --- cSpell.json | 1 + tests/http_tracker.rs | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/cSpell.json b/cSpell.json index 0d0b73701..5d0a6e1f1 100644 --- a/cSpell.json +++ b/cSpell.json @@ -70,6 +70,7 @@ "untuple", "uroot", "Vagaa", + "Vuze", "Xtorrent", "Xunlei" ] diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index cec0e4f88..91c48c09c 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -674,14 +674,22 @@ mod http_tracker_server { } mod receiving_an_scrape_request { + + // Scrape specification: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape + use crate::http::asserts::assert_internal_server_error_response; use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; + use crate::http::server::start_public_http_tracker; #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; - + let http_tracker_server = start_public_http_tracker().await; let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; assert_internal_server_error_response(response).await; From d7610eff410fae2ee2d715fc281e0c36cb92886a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 11:52:30 +0000 Subject: [PATCH 289/435] refactor(http): [#159] move mods to folders We will use one mod per type of request and response. --- tests/http/asserts.rs | 9 +-- tests/http/client.rs | 8 +- .../{requests.rs => requests/announce.rs} | 30 +++---- tests/http/requests/mod.rs | 1 + .../{responses.rs => responses/announce.rs} | 8 +- tests/http/responses/mod.rs | 1 + tests/http_tracker.rs | 79 ++++++++++--------- 7 files changed, 69 insertions(+), 67 deletions(-) rename tests/http/{requests.rs => requests/announce.rs} (93%) create mode 100644 tests/http/requests/mod.rs rename tests/http/{responses.rs => responses/announce.rs} (94%) create mode 100644 tests/http/responses/mod.rs diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index cf4683a7b..ec31b1ee4 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,7 +1,6 @@ use reqwest::Response; -use super::responses::{Announce, DecodedCompactAnnounce}; -use crate::http::responses::{CompactAnnounce, Error}; +use super::responses::announce::{Announce, Compact, DecodedCompact, Error}; pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); @@ -22,18 +21,18 @@ pub async fn assert_announce_response(response: Response, expected_announce_resp /// ```text /// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" /// ``` -pub async fn assert_compact_announce_response(response: Response, expected_response: &DecodedCompactAnnounce) { +pub async fn assert_compact_announce_response(response: Response, expected_response: &DecodedCompact) { assert_eq!(response.status(), 200); let bytes = response.bytes().await.unwrap(); - let compact_announce: CompactAnnounce = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { + let compact_announce: Compact = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { panic!( "response body should be a valid compact announce response, got \"{:?}\"", &bytes ) }); - let actual_response = DecodedCompactAnnounce::from(compact_announce); + let actual_response = DecodedCompact::from(compact_announce); assert_eq!(actual_response, *expected_response); } diff --git a/tests/http/client.rs b/tests/http/client.rs index 062484e83..2d53463dd 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -4,7 +4,7 @@ use reqwest::{Client as ReqwestClient, Response}; use torrust_tracker::tracker::auth::KeyId; use super::connection_info::ConnectionInfo; -use super::requests::AnnounceQuery; +use super::requests::announce::Query; /// HTTP Tracker Client pub struct Client { @@ -47,11 +47,11 @@ impl Client { } } - pub async fn announce(&self, query: &AnnounceQuery) -> Response { + pub async fn announce(&self, query: &Query) -> Response { self.get(&self.build_announce_path_and_query(query)).await } - pub async fn announce_with_header(&self, query: &AnnounceQuery, key_id: &str, value: &str) -> Response { + pub async fn announce_with_header(&self, query: &Query, key_id: &str, value: &str) -> Response { self.get_with_header(&self.build_announce_path_and_query(query), key_id, value) .await } @@ -69,7 +69,7 @@ impl Client { .unwrap() } - fn build_announce_path_and_query(&self, query: &AnnounceQuery) -> String { + fn build_announce_path_and_query(&self, query: &Query) -> String { format!("{}?{query}", self.build_path("announce")) } diff --git a/tests/http/requests.rs b/tests/http/requests/announce.rs similarity index 93% rename from tests/http/requests.rs rename to tests/http/requests/announce.rs index 9135020e9..8fe43348f 100644 --- a/tests/http/requests.rs +++ b/tests/http/requests/announce.rs @@ -7,7 +7,7 @@ use serde_repr::Serialize_repr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; -pub struct AnnounceQuery { +pub struct Query { pub info_hash: ByteArray20, pub peer_addr: IpAddr, pub downloaded: BaseTenASCII, @@ -19,7 +19,7 @@ pub struct AnnounceQuery { pub compact: Option, } -impl fmt::Display for AnnounceQuery { +impl fmt::Display for Query { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.build()) } @@ -30,7 +30,7 @@ impl fmt::Display for AnnounceQuery { /// /// /// Some parameters in the specification are not implemented in this tracker yet. -impl AnnounceQuery { +impl Query { /// It builds the URL query component for the announce request. /// /// This custom URL query params encoding is needed because `reqwest` does not allow @@ -41,8 +41,8 @@ impl AnnounceQuery { self.params().to_string() } - pub fn params(&self) -> AnnounceQueryParams { - AnnounceQueryParams::from(self) + pub fn params(&self) -> QueryParams { + QueryParams::from(self) } } @@ -82,13 +82,13 @@ impl fmt::Display for Compact { } } -pub struct AnnounceQueryBuilder { - announce_query: AnnounceQuery, +pub struct QueryBuilder { + announce_query: Query, } -impl AnnounceQueryBuilder { - pub fn default() -> AnnounceQueryBuilder { - let default_announce_query = AnnounceQuery { +impl QueryBuilder { + pub fn default() -> QueryBuilder { + let default_announce_query = Query { info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, @@ -129,7 +129,7 @@ impl AnnounceQueryBuilder { self } - pub fn query(self) -> AnnounceQuery { + pub fn query(self) -> Query { self.announce_query } } @@ -150,7 +150,7 @@ impl AnnounceQueryBuilder { /// event=completed /// compact=0 /// ``` -pub struct AnnounceQueryParams { +pub struct QueryParams { pub info_hash: Option, pub peer_addr: Option, pub downloaded: Option, @@ -162,7 +162,7 @@ pub struct AnnounceQueryParams { pub compact: Option, } -impl std::fmt::Display for AnnounceQueryParams { +impl std::fmt::Display for QueryParams { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let mut params = vec![]; @@ -204,8 +204,8 @@ impl std::fmt::Display for AnnounceQueryParams { } } -impl AnnounceQueryParams { - pub fn from(announce_query: &AnnounceQuery) -> Self { +impl QueryParams { + pub fn from(announce_query: &Query) -> Self { let event = announce_query.event.as_ref().map(std::string::ToString::to_string); let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); diff --git a/tests/http/requests/mod.rs b/tests/http/requests/mod.rs new file mode 100644 index 000000000..74894de33 --- /dev/null +++ b/tests/http/requests/mod.rs @@ -0,0 +1 @@ +pub mod announce; diff --git a/tests/http/responses.rs b/tests/http/responses/announce.rs similarity index 94% rename from tests/http/responses.rs rename to tests/http/responses/announce.rs index 7cf283916..6bdc82cdd 100644 --- a/tests/http/responses.rs +++ b/tests/http/responses/announce.rs @@ -31,7 +31,7 @@ impl From for DictionaryPeer { } #[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct CompactAnnounce { +pub struct Compact { pub complete: u32, pub incomplete: u32, pub interval: u32, @@ -42,7 +42,7 @@ pub struct CompactAnnounce { } #[derive(Debug, PartialEq)] -pub struct DecodedCompactAnnounce { +pub struct DecodedCompact { // code-review: there could be a way to deserialize this struct directly // by using serde instead of doing it manually. Or at least using a custom deserializer. pub complete: u32, @@ -88,8 +88,8 @@ impl CompactPeer { } } -impl From for DecodedCompactAnnounce { - fn from(compact_announce: CompactAnnounce) -> Self { +impl From for DecodedCompact { + fn from(compact_announce: Compact) -> Self { let mut peers = vec![]; for peer_bytes in compact_announce.peers.chunks_exact(6) { diff --git a/tests/http/responses/mod.rs b/tests/http/responses/mod.rs new file mode 100644 index 000000000..74894de33 --- /dev/null +++ b/tests/http/responses/mod.rs @@ -0,0 +1 @@ +pub mod announce; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 91c48c09c..b315f82c2 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -37,8 +37,9 @@ mod http_tracker_server { assert_invalid_peer_id_error_response, assert_is_announce_response, }; use crate::http::client::Client; - use crate::http::requests::{AnnounceQueryBuilder, Compact}; - use crate::http::responses::{self, Announce, CompactAnnounce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::http::requests::announce::{Compact, QueryBuilder}; + use crate::http::responses; + use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; use crate::http::server::{ start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, start_ipv6_http_tracker, start_public_http_tracker, @@ -48,7 +49,7 @@ mod http_tracker_server { async fn should_respond_when_only_the_mandatory_fields_are_provided() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.remove_optional_params(); @@ -74,7 +75,7 @@ mod http_tracker_server { // Without `info_hash` param - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.info_hash = None; @@ -86,7 +87,7 @@ mod http_tracker_server { // Without `peer_id` param - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.peer_id = None; @@ -98,7 +99,7 @@ mod http_tracker_server { // Without `port` param - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.port = None; @@ -113,7 +114,7 @@ mod http_tracker_server { async fn should_fail_when_the_info_hash_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); @@ -135,7 +136,7 @@ mod http_tracker_server { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); @@ -150,7 +151,7 @@ mod http_tracker_server { async fn should_fail_when_the_downloaded_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -169,7 +170,7 @@ mod http_tracker_server { async fn should_fail_when_the_uploaded_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -188,7 +189,7 @@ mod http_tracker_server { async fn should_fail_when_the_peer_id_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = [ "0", @@ -214,7 +215,7 @@ mod http_tracker_server { async fn should_fail_when_the_port_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -233,7 +234,7 @@ mod http_tracker_server { async fn should_fail_when_the_left_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -254,7 +255,7 @@ mod http_tracker_server { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = [ "0", @@ -281,7 +282,7 @@ mod http_tracker_server { async fn should_not_fail_when_the_compact_param_is_invalid() { let http_tracker_server = start_default_http_tracker().await; - let mut params = AnnounceQueryBuilder::default().query().params(); + let mut params = QueryBuilder::default().query().params(); let invalid_values = ["-1", "1.1", "a"]; @@ -302,7 +303,7 @@ mod http_tracker_server { let response = Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) .query(), ) @@ -338,7 +339,7 @@ mod http_tracker_server { // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer::Id(*b"-qB00000000000000002")) .query(), @@ -369,7 +370,7 @@ mod http_tracker_server { // Add a peer http_tracker_server.add_torrent(&info_hash, &peer).await; - let announce_query = AnnounceQueryBuilder::default() + let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer.peer_id) .query(); @@ -403,7 +404,7 @@ mod http_tracker_server { // Announce the new Peer 2 accepting compact responses let response = Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer::Id(*b"-qB00000000000000002")) .with_compact(Compact::Accepted) @@ -411,7 +412,7 @@ mod http_tracker_server { ) .await; - let expected_response = responses::DecodedCompactAnnounce { + let expected_response = responses::announce::DecodedCompact { complete: 2, incomplete: 0, interval: 120, @@ -444,7 +445,7 @@ mod http_tracker_server { // https://www.bittorrent.org/beps/bep_0023.html let response = Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer::Id(*b"-qB00000000000000002")) .without_compact() @@ -457,7 +458,7 @@ mod http_tracker_server { async fn is_a_compact_announce_response(response: Response) -> bool { let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); + let compact_announce = serde_bencode::from_bytes::(&bytes); compact_announce.is_ok() } @@ -466,7 +467,7 @@ mod http_tracker_server { let http_tracker_server = start_public_http_tracker().await; Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; let stats = http_tracker_server.tracker.get_stats().await; @@ -479,7 +480,7 @@ mod http_tracker_server { let http_tracker_server = start_ipv6_http_tracker().await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; let stats = http_tracker_server.tracker.get_stats().await; @@ -495,7 +496,7 @@ mod http_tracker_server { Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) .query(), ) @@ -511,7 +512,7 @@ mod http_tracker_server { let http_tracker_server = start_public_http_tracker().await; Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; let stats = http_tracker_server.tracker.get_stats().await; @@ -524,7 +525,7 @@ mod http_tracker_server { let http_tracker_server = start_ipv6_http_tracker().await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; let stats = http_tracker_server.tracker.get_stats().await; @@ -540,7 +541,7 @@ mod http_tracker_server { Client::new(http_tracker_server.get_connection_info()) .announce( - &AnnounceQueryBuilder::default() + &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) .query(), ) @@ -560,7 +561,7 @@ mod http_tracker_server { let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - let announce_query = AnnounceQueryBuilder::default() + let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); @@ -591,7 +592,7 @@ mod http_tracker_server { let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - let announce_query = AnnounceQueryBuilder::default() + let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); @@ -624,7 +625,7 @@ mod http_tracker_server { let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - let announce_query = AnnounceQueryBuilder::default() + let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); @@ -653,7 +654,7 @@ mod http_tracker_server { let client = Client::new(http_tracker_server.get_connection_info()); - let announce_query = AnnounceQueryBuilder::default().with_info_hash(&info_hash).query(); + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); // todo: shouldn't be the the leftmost IP address? // THe application is taken the the rightmost IP address. See function http::filters::peer_addr @@ -706,7 +707,7 @@ mod http_tracker_server { use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; - use crate::http::requests::AnnounceQueryBuilder; + use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_whitelisted_http_tracker; #[tokio::test] @@ -716,7 +717,7 @@ mod http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_torrent_not_in_whitelist_error_response(response).await; @@ -735,7 +736,7 @@ mod http_tracker_server { .expect("should add the torrent to the whitelist"); let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_is_announce_response(response).await; @@ -759,7 +760,7 @@ mod http_tracker_server { assert_peer_not_authenticated_error_response, }; use crate::http::client::Client; - use crate::http::requests::AnnounceQueryBuilder; + use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_private_http_tracker; #[tokio::test] @@ -773,7 +774,7 @@ mod http_tracker_server { .unwrap(); let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; assert_is_announce_response(response).await; @@ -786,7 +787,7 @@ mod http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&AnnounceQueryBuilder::default().with_info_hash(&info_hash).query()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_peer_not_authenticated_error_response(response).await; @@ -800,7 +801,7 @@ mod http_tracker_server { let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) - .announce(&AnnounceQueryBuilder::default().query()) + .announce(&QueryBuilder::default().query()) .await; assert_invalid_authentication_key_error_response(response).await; From dc304e74542e2237c42f5241754ed272399910a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 19:55:18 +0000 Subject: [PATCH 290/435] test(http): [#159] scaffolding to test scrape responses in http tracker --- tests/http/asserts.rs | 3 +- tests/http/bencode.rs | 1 + tests/http/client.rs | 15 ++++- tests/http/mod.rs | 1 + tests/http/requests/announce.rs | 3 +- tests/http/requests/mod.rs | 1 + tests/http/requests/scrape.rs | 108 +++++++++++++++++++++++++++++++ tests/http/responses/announce.rs | 6 -- tests/http/responses/error.rs | 7 ++ tests/http/responses/mod.rs | 2 + tests/http/responses/scrape.rs | 91 ++++++++++++++++++++++++++ tests/http_tracker.rs | 54 +++++++++++++++- 12 files changed, 280 insertions(+), 12 deletions(-) create mode 100644 tests/http/bencode.rs create mode 100644 tests/http/requests/scrape.rs create mode 100644 tests/http/responses/error.rs create mode 100644 tests/http/responses/scrape.rs diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index ec31b1ee4..c75af1f74 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,6 +1,7 @@ use reqwest::Response; -use super::responses::announce::{Announce, Compact, DecodedCompact, Error}; +use super::responses::announce::{Announce, Compact, DecodedCompact}; +use crate::http::responses::error::Error; pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); diff --git a/tests/http/bencode.rs b/tests/http/bencode.rs new file mode 100644 index 000000000..b67b278d7 --- /dev/null +++ b/tests/http/bencode.rs @@ -0,0 +1 @@ +pub type ByteArray20 = [u8; 20]; diff --git a/tests/http/client.rs b/tests/http/client.rs index 2d53463dd..b59cf2ac6 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -4,7 +4,8 @@ use reqwest::{Client as ReqwestClient, Response}; use torrust_tracker::tracker::auth::KeyId; use super::connection_info::ConnectionInfo; -use super::requests::announce::Query; +use super::requests::announce::{self, Query}; +use super::requests::scrape; /// HTTP Tracker Client pub struct Client { @@ -47,10 +48,14 @@ impl Client { } } - pub async fn announce(&self, query: &Query) -> Response { + pub async fn announce(&self, query: &announce::Query) -> Response { self.get(&self.build_announce_path_and_query(query)).await } + pub async fn scrape(&self, query: &scrape::Query) -> Response { + self.get(&self.build_scrape_path_and_query(query)).await + } + pub async fn announce_with_header(&self, query: &Query, key_id: &str, value: &str) -> Response { self.get_with_header(&self.build_announce_path_and_query(query), key_id, value) .await @@ -69,10 +74,14 @@ impl Client { .unwrap() } - fn build_announce_path_and_query(&self, query: &Query) -> String { + fn build_announce_path_and_query(&self, query: &announce::Query) -> String { format!("{}?{query}", self.build_path("announce")) } + fn build_scrape_path_and_query(&self, query: &scrape::Query) -> String { + format!("{}?{query}", self.build_path("scrape")) + } + fn build_path(&self, path: &str) -> String { match &self.key_id { Some(key_id) => format!("{path}/{key_id}"), diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 2ab8b2c1c..87087026f 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,4 +1,5 @@ pub mod asserts; +pub mod bencode; pub mod client; pub mod connection_info; pub mod requests; diff --git a/tests/http/requests/announce.rs b/tests/http/requests/announce.rs index 8fe43348f..5656d8f1d 100644 --- a/tests/http/requests/announce.rs +++ b/tests/http/requests/announce.rs @@ -7,6 +7,8 @@ use serde_repr::Serialize_repr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; +use crate::http::bencode::ByteArray20; + pub struct Query { pub info_hash: ByteArray20, pub peer_addr: IpAddr, @@ -47,7 +49,6 @@ impl Query { } pub type BaseTenASCII = u64; -pub type ByteArray20 = [u8; 20]; pub type PortNumber = u16; pub enum Event { diff --git a/tests/http/requests/mod.rs b/tests/http/requests/mod.rs index 74894de33..776d2dfbf 100644 --- a/tests/http/requests/mod.rs +++ b/tests/http/requests/mod.rs @@ -1 +1,2 @@ pub mod announce; +pub mod scrape; diff --git a/tests/http/requests/scrape.rs b/tests/http/requests/scrape.rs new file mode 100644 index 000000000..6198f1680 --- /dev/null +++ b/tests/http/requests/scrape.rs @@ -0,0 +1,108 @@ +use std::fmt; +use std::str::FromStr; + +use percent_encoding::NON_ALPHANUMERIC; +use torrust_tracker::protocol::info_hash::InfoHash; + +use crate::http::bencode::ByteArray20; + +pub struct Query { + pub info_hash: Vec, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Scrape Request: +/// +/// +impl Query { + /// It builds the URL query component for the scrape request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + pub fn build(&self) -> String { + self.params().to_string() + } + + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub struct QueryBuilder { + scrape_query: Query, +} + +impl QueryBuilder { + pub fn default() -> QueryBuilder { + let default_scrape_query = Query { + info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), + }; + Self { + scrape_query: default_scrape_query, + } + } + + pub fn with_one_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash = [info_hash.0].to_vec(); + self + } + + pub fn query(self) -> Query { + self.scrape_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Scrape request. +/// +/// The `info_hash` param is the percent encoded of the the 20-byte array info hash. +/// +/// Sample Scrape URL with all the GET parameters: +/// +/// For `IpV4`: +/// +/// ```text +/// http://127.0.0.1:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// For `IpV6`: +/// +/// ```text +/// http://[::1]:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// You can add as many info hashes as you want, just adding the same param again. +pub struct QueryParams { + pub info_hash: Vec, +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let query = self + .info_hash + .iter() + .map(|info_hash| format!("info_hash={}", &info_hash)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(scrape_query: &Query) -> Self { + let info_hashes = scrape_query + .info_hash + .iter() + .map(|info_hash_bytes| percent_encoding::percent_encode(info_hash_bytes, NON_ALPHANUMERIC).to_string()) + .collect::>(); + + Self { info_hash: info_hashes } + } +} diff --git a/tests/http/responses/announce.rs b/tests/http/responses/announce.rs index 6bdc82cdd..838a0b41c 100644 --- a/tests/http/responses/announce.rs +++ b/tests/http/responses/announce.rs @@ -105,9 +105,3 @@ impl From for DecodedCompact { } } } - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Error { - #[serde(rename = "failure reason")] - pub failure_reason: String, -} diff --git a/tests/http/responses/error.rs b/tests/http/responses/error.rs new file mode 100644 index 000000000..12c53a0cf --- /dev/null +++ b/tests/http/responses/error.rs @@ -0,0 +1,7 @@ +use serde::{self, Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/tests/http/responses/mod.rs b/tests/http/responses/mod.rs index 74894de33..bdc689056 100644 --- a/tests/http/responses/mod.rs +++ b/tests/http/responses/mod.rs @@ -1 +1,3 @@ pub mod announce; +pub mod error; +pub mod scrape; diff --git a/tests/http/responses/scrape.rs b/tests/http/responses/scrape.rs new file mode 100644 index 000000000..450006815 --- /dev/null +++ b/tests/http/responses/scrape.rs @@ -0,0 +1,91 @@ +use std::collections::HashMap; +use std::str; + +use serde::{self, Deserialize, Serialize}; +use serde_bencode::value::Value; + +use crate::http::bencode::ByteArray20; + +#[derive(Debug, PartialEq)] +pub struct Response { + pub files: HashMap, +} + +impl Response { + pub fn from_bytes(bytes: &[u8]) -> Self { + let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); + Self::from(scrape_response) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct File { + pub complete: i64, + pub downloaded: i64, + pub incomplete: i64, +} + +impl From for Response { + fn from(scrape_response: DeserializedResponse) -> Self { + // todo: + // - Use `try_from` trait instead of `from`. + // - Improve error messages. + // - Extract parser function out of the trait. + // - Extract parser for each nested element. + // - Extract function to instantiate [u8; 20] from Vec. + let mut files: HashMap = HashMap::new(); + + match scrape_response.files { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = match &file_value { + Value::Dict(dict) => { + let mut file = File { + complete: 0, + downloaded: 0, + incomplete: 0, + }; + + for file_field in dict { + let value = match file_field.1 { + Value::Int(number) => *number, + _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), + }; + + if file_field.0 == b"complete" { + file.complete = value; + } else if file_field.0 == b"downloaded" { + file.downloaded = value; + } else if file_field.0 == b"incomplete" { + file.incomplete = value; + } else { + panic!("Error parsing bencoded scrape response. Invalid field"); + } + } + + file + } + _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), + }; + + // Clone Vec into [u8; 20] + let mut info_hash_byte_array: [u8; 20] = Default::default(); + info_hash_byte_array.clone_from_slice(info_hash_byte_vec.as_slice()); + + files.insert(info_hash_byte_array, file); + } + } + _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), + } + + Self { files } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +struct DeserializedResponse { + pub files: Value, +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index b315f82c2..d2272fc31 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -676,7 +676,7 @@ mod http_tracker_server { mod receiving_an_scrape_request { - // Scrape specification: + // Scrape documentation: // // BEP 48. Tracker Protocol Extension: Scrape // https://www.bittorrent.org/beps/bep_0048.html @@ -684,8 +684,17 @@ mod http_tracker_server { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Scrape + use std::collections::HashMap; + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; use crate::http::asserts::assert_internal_server_error_response; use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, Response}; use crate::http::server::start_public_http_tracker; #[tokio::test] @@ -695,6 +704,49 @@ mod http_tracker_server { assert_internal_server_error_response(response).await; } + + #[tokio::test] + async fn should_return_the_scrape_response() { + let http_tracker_server = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .into(); + + // Add the Peer + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Scrape the tracker + let response = Client::new(http_tracker_server.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + // todo: extract scrape response builder or named constructor. + // A builder with an "add_file(info_hash_bytes: &[u8], file: File)" method could be a good solution. + let mut files = HashMap::new(); + files.insert( + info_hash.0, + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ); + let expected_scrape_response = Response { files }; + + // todo: extract assert + assert_eq!(response.status(), 200); + let bytes = response.bytes().await.unwrap(); + let scrape_response = Response::from_bytes(&bytes); + assert_eq!(scrape_response, expected_scrape_response); + } } } From 2754189ed63dc715712279b4442f4922ce403271 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 20:27:58 +0000 Subject: [PATCH 291/435] refactor(http): [#159] rename struct in announce responses to follow new scrape conventions Deserialization from bencoded bytes for announce and scrape request is done in two phases. First using `serde_bencode::from_bytes` and later with a custom parser. The reason is the `serde_bencode` crate does not allow direct deserialization for the strcuts we need. The strcut resulting from the first deserialization done by `serde_bencode` is the `DeserializedCompact` and the second one just `Compact`. So the prefix `Deserialized` is used when the bytes in the reponse body are converted into a struct. --- src/protocol/info_hash.rs | 8 ++++++++ tests/http/asserts.rs | 10 ++++++---- tests/http/responses/announce.rs | 8 ++++---- tests/http_tracker.rs | 6 +++--- 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 3d2fad1a5..83a595c1f 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -1,6 +1,14 @@ #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct InfoHash(pub [u8; 20]); +impl InfoHash { + /// For readability, when accessing the bytes array + #[must_use] + pub fn bytes(&self) -> [u8; 20] { + self.0 + } +} + impl std::fmt::Display for InfoHash { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let mut chars = [0u8; 40]; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index c75af1f74..4e2214317 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,6 +1,6 @@ use reqwest::Response; -use super::responses::announce::{Announce, Compact, DecodedCompact}; +use super::responses::announce::{Announce, Compact, DeserializedCompact}; use crate::http::responses::error::Error; pub async fn assert_empty_announce_response(response: Response) { @@ -22,18 +22,20 @@ pub async fn assert_announce_response(response: Response, expected_announce_resp /// ```text /// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" /// ``` -pub async fn assert_compact_announce_response(response: Response, expected_response: &DecodedCompact) { +pub async fn assert_compact_announce_response(response: Response, expected_response: &Compact) { assert_eq!(response.status(), 200); let bytes = response.bytes().await.unwrap(); - let compact_announce: Compact = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { + // todo: move to DeserializedCompact constructor and make DeserializedCompact struct private + let compact_announce: DeserializedCompact = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { panic!( "response body should be a valid compact announce response, got \"{:?}\"", &bytes ) }); - let actual_response = DecodedCompact::from(compact_announce); + + let actual_response = Compact::from(compact_announce); assert_eq!(actual_response, *expected_response); } diff --git a/tests/http/responses/announce.rs b/tests/http/responses/announce.rs index 838a0b41c..85f0347cc 100644 --- a/tests/http/responses/announce.rs +++ b/tests/http/responses/announce.rs @@ -31,7 +31,7 @@ impl From for DictionaryPeer { } #[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Compact { +pub struct DeserializedCompact { pub complete: u32, pub incomplete: u32, pub interval: u32, @@ -42,7 +42,7 @@ pub struct Compact { } #[derive(Debug, PartialEq)] -pub struct DecodedCompact { +pub struct Compact { // code-review: there could be a way to deserialize this struct directly // by using serde instead of doing it manually. Or at least using a custom deserializer. pub complete: u32, @@ -88,8 +88,8 @@ impl CompactPeer { } } -impl From for DecodedCompact { - fn from(compact_announce: Compact) -> Self { +impl From for Compact { + fn from(compact_announce: DeserializedCompact) -> Self { let mut peers = vec![]; for peer_bytes in compact_announce.peers.chunks_exact(6) { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index d2272fc31..3e1391c63 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -412,7 +412,7 @@ mod http_tracker_server { ) .await; - let expected_response = responses::announce::DecodedCompact { + let expected_response = responses::announce::Compact { complete: 2, incomplete: 0, interval: 120, @@ -458,7 +458,7 @@ mod http_tracker_server { async fn is_a_compact_announce_response(response: Response) -> bool { let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); + let compact_announce = serde_bencode::from_bytes::(&bytes); compact_announce.is_ok() } @@ -732,7 +732,7 @@ mod http_tracker_server { // A builder with an "add_file(info_hash_bytes: &[u8], file: File)" method could be a good solution. let mut files = HashMap::new(); files.insert( - info_hash.0, + info_hash.bytes(), File { complete: 1, downloaded: 0, From c89a1f38ff96bacc4830c71824f2c7e2871ac838 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 30 Jan 2023 20:51:04 +0000 Subject: [PATCH 292/435] fix(http): [#159] minor text fixes --- src/tracker/peer.rs | 2 +- tests/http/server.rs | 6 +++--- tests/http_tracker.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 5da894f54..22889381f 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -104,7 +104,7 @@ impl Id { #[must_use] /// Converts to hex string. /// - /// For the Id `-qB00000000000000000` ti returns `2d71423030303030303030303030303030303030` + /// For the Id `-qB00000000000000000` it returns `2d71423030303030303030303030303030303030` /// /// For example: /// diff --git a/tests/http/server.rs b/tests/http/server.rs index 6741d8e97..e48ecd88d 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -12,21 +12,21 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; -/// Starts a HTTP tracker with mode "public" +/// Starts a HTTP tracker with mode "public" in settings pub async fn start_public_http_tracker() -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Public; start_custom_http_tracker(Arc::new(configuration)).await } -/// Starts a HTTP tracker with mode "listed" +/// Starts a HTTP tracker with mode "listed" in settings pub async fn start_whitelisted_http_tracker() -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Listed; start_custom_http_tracker(Arc::new(configuration)).await } -/// Starts a HTTP tracker with mode "listed" +/// Starts a HTTP tracker with mode "private" in settings pub async fn start_private_http_tracker() -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Private; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 3e1391c63..099e1d360 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -46,7 +46,7 @@ mod http_tracker_server { }; #[tokio::test] - async fn should_respond_when_only_the_mandatory_fields_are_provided() { + async fn should_respond_if_only_the_mandatory_fields_are_provided() { let http_tracker_server = start_default_http_tracker().await; let mut params = QueryBuilder::default().query().params(); @@ -251,7 +251,7 @@ mod http_tracker_server { #[tokio::test] async fn should_not_fail_when_the_event_param_is_invalid() { - // All invalid values are ignored as if the `event` param was empty + // All invalid values are ignored as if the `event` param were empty let http_tracker_server = start_default_http_tracker().await; From 7ee588a565e8746f6195adfea890ee78a7e96dd7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 31 Jan 2023 18:20:27 +0000 Subject: [PATCH 293/435] refactor(test): [#159] refactor tests for scrape request --- cSpell.json | 4 +- src/tracker/peer.rs | 2 +- tests/common/fixtures.rs | 12 ++ tests/http/asserts.rs | 16 ++- tests/http/bencode.rs | 14 ++ tests/http/requests/announce.rs | 2 +- tests/http/responses/scrape.rs | 223 +++++++++++++++++++++++--------- tests/http_tracker.rs | 76 +++++------ 8 files changed, 235 insertions(+), 114 deletions(-) diff --git a/cSpell.json b/cSpell.json index 5d0a6e1f1..dc51c87c5 100644 --- a/cSpell.json +++ b/cSpell.json @@ -72,6 +72,8 @@ "Vagaa", "Vuze", "Xtorrent", - "Xunlei" + "Xunlei", + "xxxxxxxxxxxxxxxxxxxxd", + "yyyyyyyyyyyyyyyyyyyyd" ] } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 22889381f..3f639f970 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -20,7 +20,7 @@ pub struct Peer { #[serde(with = "NumberOfBytesDef")] pub downloaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] - pub left: NumberOfBytes, + pub left: NumberOfBytes, // The number of bytes this peer still has to download #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 0ff6798f6..5e644c45f 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -21,6 +21,18 @@ impl PeerBuilder { self } + #[allow(dead_code)] + pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + #[allow(dead_code)] + pub fn build(self) -> Peer { + self.into() + } + + #[allow(dead_code)] pub fn into(self) -> Peer { self.peer } diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 4e2214317..b8ccfee22 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,6 +1,7 @@ use reqwest::Response; use super::responses::announce::{Announce, Compact, DeserializedCompact}; +use super::responses::scrape; use crate::http::responses::error::Error; pub async fn assert_empty_announce_response(response: Response) { @@ -17,7 +18,7 @@ pub async fn assert_announce_response(response: Response, expected_announce_resp assert_eq!(announce_response, *expected_announce_response); } -/// Sample bencoded response as byte array: +/// Sample bencoded announce response as byte array: /// /// ```text /// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" @@ -40,6 +41,19 @@ pub async fn assert_compact_announce_response(response: Response, expected_respo assert_eq!(actual_response, *expected_response); } +/// Sample bencoded scrape response as byte array: +/// +/// ```text +/// b"d5:filesd20:\x9c8B\"\x13\xe3\x0b\xff!+0\xc3`\xd2o\x9a\x02\x13d\"d8:completei1e10:downloadedi0e10:incompletei0eeee" +/// ``` +pub async fn assert_scrape_response(response: Response, expected_response: &scrape::Response) { + assert_eq!(response.status(), 200); + + let scrape_response = scrape::Response::try_from_bytes(&response.bytes().await.unwrap()).unwrap(); + + assert_eq!(scrape_response, *expected_response); +} + pub async fn assert_is_announce_response(response: Response) { assert_eq!(response.status(), 200); let body = response.text().await.unwrap(); diff --git a/tests/http/bencode.rs b/tests/http/bencode.rs index b67b278d7..d107089cf 100644 --- a/tests/http/bencode.rs +++ b/tests/http/bencode.rs @@ -1 +1,15 @@ pub type ByteArray20 = [u8; 20]; + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/tests/http/requests/announce.rs b/tests/http/requests/announce.rs index 5656d8f1d..a8ebc95f8 100644 --- a/tests/http/requests/announce.rs +++ b/tests/http/requests/announce.rs @@ -225,7 +225,7 @@ impl QueryParams { pub fn remove_optional_params(&mut self) { // todo: make them optional with the Option<...> in the AnnounceQuery struct - // if they are really optional. SO that we can crete a minimal AnnounceQuery + // if they are really optional. So that we can crete a minimal AnnounceQuery // instead of removing the optional params afterwards. // // The original specification on: diff --git a/tests/http/responses/scrape.rs b/tests/http/responses/scrape.rs index 450006815..c9081a10f 100644 --- a/tests/http/responses/scrape.rs +++ b/tests/http/responses/scrape.rs @@ -4,84 +4,36 @@ use std::str; use serde::{self, Deserialize, Serialize}; use serde_bencode::value::Value; -use crate::http::bencode::ByteArray20; +use crate::http::bencode::{ByteArray20, InfoHash}; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Default)] pub struct Response { pub files: HashMap, } impl Response { - pub fn from_bytes(bytes: &[u8]) -> Self { + pub fn try_from_bytes(bytes: &[u8]) -> Result { let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); - Self::from(scrape_response) + Self::try_from(scrape_response) + } + + pub fn empty() -> Self { + Self::default() } } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct File { - pub complete: i64, - pub downloaded: i64, - pub incomplete: i64, + pub complete: i64, // The number of active peers that have completed downloading + pub downloaded: i64, // The number of peers that have ever completed downloading + pub incomplete: i64, // The number of active peers that have not completed downloading } -impl From for Response { - fn from(scrape_response: DeserializedResponse) -> Self { - // todo: - // - Use `try_from` trait instead of `from`. - // - Improve error messages. - // - Extract parser function out of the trait. - // - Extract parser for each nested element. - // - Extract function to instantiate [u8; 20] from Vec. - let mut files: HashMap = HashMap::new(); - - match scrape_response.files { - Value::Dict(dict) => { - for file_element in dict { - let info_hash_byte_vec = file_element.0; - let file_value = file_element.1; - - let file = match &file_value { - Value::Dict(dict) => { - let mut file = File { - complete: 0, - downloaded: 0, - incomplete: 0, - }; - - for file_field in dict { - let value = match file_field.1 { - Value::Int(number) => *number, - _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), - }; - - if file_field.0 == b"complete" { - file.complete = value; - } else if file_field.0 == b"downloaded" { - file.downloaded = value; - } else if file_field.0 == b"incomplete" { - file.incomplete = value; - } else { - panic!("Error parsing bencoded scrape response. Invalid field"); - } - } - - file - } - _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), - }; - - // Clone Vec into [u8; 20] - let mut info_hash_byte_array: [u8; 20] = Default::default(); - info_hash_byte_array.clone_from_slice(info_hash_byte_vec.as_slice()); - - files.insert(info_hash_byte_array, file); - } - } - _ => panic!("Error parsing bencoded scrape response. Invalid value. Expected "), - } +impl TryFrom for Response { + type Error = BencodeParseError; - Self { files } + fn try_from(scrape_response: DeserializedResponse) -> Result { + parse_bencoded_response(&scrape_response.files) } } @@ -89,3 +41,148 @@ impl From for Response { struct DeserializedResponse { pub files: Value, } + +pub struct ResponseBuilder { + response: Response, +} + +impl ResponseBuilder { + pub fn default() -> Self { + Self { + response: Response::empty(), + } + } + + pub fn add_file(mut self, info_hash_bytes: ByteArray20, file: File) -> Self { + self.response.files.insert(info_hash_bytes, file); + self + } + + pub fn build(self) -> Response { + self.response + } +} + +#[derive(Debug)] +pub enum BencodeParseError { + InvalidValueExpectedDict { value: Value }, + InvalidValueExpectedInt { value: Value }, + InvalidFileField { value: Value }, + MissingFileField { field_name: String }, +} + +/// It parses a bencoded scrape response into a `Response` struct. +/// +/// For example: +/// +/// ```text +/// d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e +/// 20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +/// ``` +/// +/// Response (JSON encoded for readability): +/// +/// ```text +/// { +/// 'files': { +/// 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +/// 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +/// } +/// } +fn parse_bencoded_response(value: &Value) -> Result { + let mut files: HashMap = HashMap::new(); + + match value { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = parse_bencoded_file(file_value).unwrap(); + + files.insert(InfoHash::new(info_hash_byte_vec).bytes(), file); + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + } + + Ok(Response { files }) +} + +/// It parses a bencoded dictionary into a `File` struct. +/// +/// For example: +/// +/// +/// ```text +/// d8:completei11e10:downloadedi13772e10:incompletei19ee +/// ``` +/// +/// into: +/// +/// ```text +/// File { +/// complete: 11, +/// downloaded: 13772, +/// incomplete: 19, +/// } +/// ``` +fn parse_bencoded_file(value: &Value) -> Result { + let file = match &value { + Value::Dict(dict) => { + let mut complete = None; + let mut downloaded = None; + let mut incomplete = None; + + for file_field in dict { + let field_name = file_field.0; + + let field_value = match file_field.1 { + Value::Int(number) => Ok(*number), + _ => Err(BencodeParseError::InvalidValueExpectedInt { + value: file_field.1.clone(), + }), + }?; + + if field_name == b"complete" { + complete = Some(field_value); + } else if field_name == b"downloaded" { + downloaded = Some(field_value); + } else if field_name == b"incomplete" { + incomplete = Some(field_value); + } else { + return Err(BencodeParseError::InvalidFileField { + value: file_field.1.clone(), + }); + } + } + + if complete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "complete".to_string(), + }); + } + + if downloaded.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "downloaded".to_string(), + }); + } + + if incomplete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "incomplete".to_string(), + }); + } + + File { + complete: complete.unwrap(), + downloaded: downloaded.unwrap(), + incomplete: incomplete.unwrap(), + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + }; + + Ok(file) +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 099e1d360..888da393a 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -5,19 +5,6 @@ mod common; mod http; mod http_tracker_server { - use std::str::FromStr; - - use percent_encoding::NON_ALPHANUMERIC; - use torrust_tracker::protocol::info_hash::InfoHash; - - #[test] - fn calculate_info_hash_param() { - let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - - let param = percent_encoding::percent_encode(&info_hash.0, NON_ALPHANUMERIC).to_string(); - - assert_eq!(param, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"); - } mod for_all_config_modes { @@ -331,7 +318,7 @@ mod http_tracker_server { // Peer 1 let previously_announced_peer = PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .into(); + .build(); // Add the Peer 1 http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; @@ -365,7 +352,7 @@ mod http_tracker_server { let http_tracker_server = start_public_http_tracker().await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = PeerBuilder::default().into(); + let peer = PeerBuilder::default().build(); // Add a peer http_tracker_server.add_torrent(&info_hash, &peer).await; @@ -396,7 +383,7 @@ mod http_tracker_server { // Peer 1 let previously_announced_peer = PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .into(); + .build(); // Add the Peer 1 http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; @@ -435,7 +422,7 @@ mod http_tracker_server { // Peer 1 let previously_announced_peer = PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .into(); + .build(); // Add the Peer 1 http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; @@ -684,17 +671,16 @@ mod http_tracker_server { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Scrape - use std::collections::HashMap; use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_internal_server_error_response; + use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; use crate::http::client::Client; use crate::http::requests; - use crate::http::responses::scrape::{File, Response}; + use crate::http::responses::scrape::{File, ResponseBuilder}; use crate::http::server::start_public_http_tracker; #[tokio::test] @@ -707,20 +693,21 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_scrape_response() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker().await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - // Peer - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .into(); - - // Add the Peer - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - // Scrape the tracker - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(http_tracker.get_connection_info()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -728,24 +715,18 @@ mod http_tracker_server { ) .await; - // todo: extract scrape response builder or named constructor. - // A builder with an "add_file(info_hash_bytes: &[u8], file: File)" method could be a good solution. - let mut files = HashMap::new(); - files.insert( - info_hash.bytes(), - File { - complete: 1, - downloaded: 0, - incomplete: 0, - }, - ); - let expected_scrape_response = Response { files }; + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); - // todo: extract assert - assert_eq!(response.status(), 200); - let bytes = response.bytes().await.unwrap(); - let scrape_response = Response::from_bytes(&bytes); - assert_eq!(scrape_response, expected_scrape_response); + assert_scrape_response(response, &expected_scrape_response).await; } } } @@ -776,6 +757,7 @@ mod http_tracker_server { } #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { let http_tracker_server = start_whitelisted_http_tracker().await; From c24d744270f3fc6e7872e3c183e19392270b0435 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 31 Jan 2023 18:54:19 +0000 Subject: [PATCH 294/435] refactor(test): [#159] refactor tests for announce request --- tests/http/asserts.rs | 3 +-- tests/http/responses/announce.rs | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index b8ccfee22..a58558bc0 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -28,8 +28,7 @@ pub async fn assert_compact_announce_response(response: Response, expected_respo let bytes = response.bytes().await.unwrap(); - // todo: move to DeserializedCompact constructor and make DeserializedCompact struct private - let compact_announce: DeserializedCompact = serde_bencode::from_bytes(&bytes).unwrap_or_else(|_| { + let compact_announce = DeserializedCompact::from_bytes(&bytes).unwrap_or_else(|_| { panic!( "response body should be a valid compact announce response, got \"{:?}\"", &bytes diff --git a/tests/http/responses/announce.rs b/tests/http/responses/announce.rs index 85f0347cc..e976ba9ae 100644 --- a/tests/http/responses/announce.rs +++ b/tests/http/responses/announce.rs @@ -41,6 +41,12 @@ pub struct DeserializedCompact { pub peers: Vec, } +impl DeserializedCompact { + pub fn from_bytes(bytes: &[u8]) -> Result { + serde_bencode::from_bytes::(bytes) + } +} + #[derive(Debug, PartialEq)] pub struct Compact { // code-review: there could be a way to deserialize this struct directly From c46e4171e5b086e021f5c803e524c9334433464b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 13:49:42 +0000 Subject: [PATCH 295/435] test(http): [#159] add more tests for scrape request --- tests/common/fixtures.rs | 6 + tests/http/asserts.rs | 2 +- tests/http/requests/scrape.rs | 11 + tests/http/responses/scrape.rs | 22 +- tests/http_tracker.rs | 365 ++++++++++++++++++++++++++++++++- 5 files changed, 391 insertions(+), 15 deletions(-) diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 5e644c45f..2abaca244 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -27,6 +27,12 @@ impl PeerBuilder { self } + #[allow(dead_code)] + pub fn with_no_bytes_pending_to_download(mut self) -> Self { + self.peer.left = NumberOfBytes(0); + self + } + #[allow(dead_code)] pub fn build(self) -> Peer { self.into() diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index a58558bc0..59f4ed42a 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -48,7 +48,7 @@ pub async fn assert_compact_announce_response(response: Response, expected_respo pub async fn assert_scrape_response(response: Response, expected_response: &scrape::Response) { assert_eq!(response.status(), 200); - let scrape_response = scrape::Response::try_from_bytes(&response.bytes().await.unwrap()).unwrap(); + let scrape_response = scrape::Response::try_from_bencoded(&response.bytes().await.unwrap()).unwrap(); assert_eq!(scrape_response, *expected_response); } diff --git a/tests/http/requests/scrape.rs b/tests/http/requests/scrape.rs index 6198f1680..6ab46974b 100644 --- a/tests/http/requests/scrape.rs +++ b/tests/http/requests/scrape.rs @@ -54,6 +54,11 @@ impl QueryBuilder { self } + pub fn add_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash.push(info_hash.0); + self + } + pub fn query(self) -> Query { self.scrape_query } @@ -82,6 +87,12 @@ pub struct QueryParams { pub info_hash: Vec, } +impl QueryParams { + pub fn set_one_info_hash_param(&mut self, info_hash: &str) { + self.info_hash = vec![info_hash.to_string()]; + } +} + impl std::fmt::Display for QueryParams { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let query = self diff --git a/tests/http/responses/scrape.rs b/tests/http/responses/scrape.rs index c9081a10f..5bf938ebe 100644 --- a/tests/http/responses/scrape.rs +++ b/tests/http/responses/scrape.rs @@ -12,23 +12,31 @@ pub struct Response { } impl Response { - pub fn try_from_bytes(bytes: &[u8]) -> Result { - let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); - Self::try_from(scrape_response) + pub fn with_one_file(info_hash_bytes: ByteArray20, file: File) -> Self { + let mut files: HashMap = HashMap::new(); + files.insert(info_hash_bytes, file); + Self { files } } - pub fn empty() -> Self { - Self::default() + pub fn try_from_bencoded(bytes: &[u8]) -> Result { + let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); + Self::try_from(scrape_response) } } -#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] pub struct File { pub complete: i64, // The number of active peers that have completed downloading pub downloaded: i64, // The number of peers that have ever completed downloading pub incomplete: i64, // The number of active peers that have not completed downloading } +impl File { + pub fn zeroed() -> Self { + Self::default() + } +} + impl TryFrom for Response { type Error = BencodeParseError; @@ -49,7 +57,7 @@ pub struct ResponseBuilder { impl ResponseBuilder { pub fn default() -> Self { Self { - response: Response::empty(), + response: Response::default(), } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 888da393a..44bb8609d 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -9,6 +9,18 @@ mod http_tracker_server { mod for_all_config_modes { mod receiving_an_announce_request { + + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + use std::net::{IpAddr, Ipv6Addr}; use std::str::FromStr; @@ -671,17 +683,19 @@ mod http_tracker_server { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Scrape + use std::net::IpAddr; use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; - use crate::common::fixtures::PeerBuilder; + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; use crate::http::client::Client; use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_public_http_tracker; + use crate::http::requests::scrape::QueryBuilder; + use crate::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; #[tokio::test] async fn should_fail_when_the_request_is_empty() { @@ -692,7 +706,25 @@ mod http_tracker_server { } #[tokio::test] - async fn should_return_the_scrape_response() { + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_public_http_tracker().await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + // code-review: it's not returning the invalid info hash error + assert_internal_server_error_response(response).await; + } + } + + #[tokio::test] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { let http_tracker = start_public_http_tracker().await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -728,6 +760,123 @@ mod http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } + + #[tokio::test] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + let http_tracker = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let http_tracker = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + } + + #[tokio::test] + async fn should_accept_multiple_infohashes() { + let http_tracker = start_public_http_tracker().await; + + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let http_tracker = start_public_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let http_tracker = start_ipv6_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::bind(http_tracker.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } } } @@ -777,7 +926,92 @@ mod http_tracker_server { } } - mod receiving_an_scrape_request {} + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_whitelisted_http_tracker; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let http_tracker = start_whitelisted_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let http_tracker = start_whitelisted_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + http_tracker + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } } mod configured_as_private { @@ -798,7 +1032,7 @@ mod http_tracker_server { use crate::http::server::start_private_http_tracker; #[tokio::test] - async fn should_respond_to_peers_providing_a_valid_authentication_key() { + async fn should_respond_to_authenticated_peers() { let http_tracker_server = start_private_http_tracker().await; let key = http_tracker_server @@ -842,7 +1076,124 @@ mod http_tracker_server { } } - mod receiving_an_scrape_request {} + mod receiving_an_scrape_request { + + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_private_http_tracker; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let http_tracker = start_private_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let http_tracker = start_private_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let key = http_tracker.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), key.id()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + + let http_tracker = start_private_http_tracker().await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let false_key_id: KeyId = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } } mod configured_as_private_and_whitelisted { From b8793e751c59b35945ffe2e02409a80bea44f428 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 18:01:05 +0000 Subject: [PATCH 296/435] feat: [#165] upgrade workflow action: actions/checkout@v3 --- .github/workflows/publish_crate.yml | 2 +- .github/workflows/publish_docker_image.yml | 2 +- .github/workflows/test_build_release.yml | 6 +++--- .github/workflows/test_docker.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index 0352064eb..40f332a8c 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -23,7 +23,7 @@ jobs: if: needs.check-secret.outputs.publish == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index 1587a0bd6..5983bf6a2 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -32,7 +32,7 @@ jobs: if: needs.check-secret.outputs.publish == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 3924eea4b..c86cf9994 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -9,7 +9,7 @@ jobs: env: CARGO_TERM_COLOR: always steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -29,7 +29,7 @@ jobs: env: CARGO_TERM_COLOR: always steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal @@ -61,7 +61,7 @@ jobs: env: CARGO_TERM_COLOR: always steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions-rs/toolchain@v1 with: profile: minimal diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml index 2cfa4de5c..0c3fc36d8 100644 --- a/.github/workflows/test_docker.yml +++ b/.github/workflows/test_docker.yml @@ -8,7 +8,7 @@ jobs: test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 From 3d1a12b46e7c2326495865ff5d19ed405970abd7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 18:05:41 +0000 Subject: [PATCH 297/435] feat: [#165] upgrade workflow action: Swatinem/rust-cache@v2 --- .github/workflows/publish_crate.yml | 2 +- .github/workflows/publish_docker_image.yml | 2 +- .github/workflows/test_build_release.yml | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index 40f332a8c..644d8af6c 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -29,7 +29,7 @@ jobs: profile: minimal toolchain: stable components: llvm-tools-preview - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Run Tests run: cargo test diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index 5983bf6a2..fd82a499e 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -38,7 +38,7 @@ jobs: profile: minimal toolchain: stable components: llvm-tools-preview - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Run Tests run: cargo test diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index c86cf9994..38760c747 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -16,7 +16,7 @@ jobs: toolchain: nightly override: true components: rustfmt, clippy - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Check Rust Formatting uses: actions-rs/cargo@v1 with: @@ -35,7 +35,7 @@ jobs: profile: minimal toolchain: stable components: llvm-tools-preview - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Check Rust Code uses: actions-rs/cargo@v1 with: @@ -66,7 +66,7 @@ jobs: with: profile: minimal toolchain: stable - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - name: Build Torrust Tracker run: cargo build --release - name: Upload Build Artifact From b14270b5d9666cd4b7260a882c91e007f85cc788 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 18:39:52 +0000 Subject: [PATCH 298/435] feat: [#165] replace workflow action actions-rs/toolchain@v1 with dtolnay/rust-toolchain@stable --- .github/workflows/publish_crate.yml | 7 ++----- .github/workflows/publish_docker_image.yml | 3 +-- .github/workflows/test_build_release.yml | 10 +++------- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index 644d8af6c..c120a0fc5 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -24,9 +24,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable components: llvm-tools-preview - uses: Swatinem/rust-cache@v2 @@ -43,11 +42,9 @@ jobs: uses: actions/checkout@v3 - name: Install stable toolchain - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable - override: true - run: cargo publish env: diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index fd82a499e..20152a727 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -33,9 +33,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable components: llvm-tools-preview - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 38760c747..6153ff77b 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -10,11 +10,9 @@ jobs: CARGO_TERM_COLOR: always steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: nightly - override: true components: rustfmt, clippy - uses: Swatinem/rust-cache@v2 - name: Check Rust Formatting @@ -30,9 +28,8 @@ jobs: CARGO_TERM_COLOR: always steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable components: llvm-tools-preview - uses: Swatinem/rust-cache@v2 @@ -62,9 +59,8 @@ jobs: CARGO_TERM_COLOR: always steps: - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable - uses: Swatinem/rust-cache@v2 - name: Build Torrust Tracker From 42e7e64542a6c570333405c227ca89ba371e6c33 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Feb 2023 18:45:01 +0000 Subject: [PATCH 299/435] refactor: [#165] remove unmantained workflow action to run cargo commmands Replaced with using the `cargo` command directly. --- .github/workflows/test_build_release.yml | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 6153ff77b..3b9a9a44a 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -16,10 +16,7 @@ jobs: components: rustfmt, clippy - uses: Swatinem/rust-cache@v2 - name: Check Rust Formatting - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --check + run: cargo fmt --check test: needs: format @@ -34,15 +31,9 @@ jobs: components: llvm-tools-preview - uses: Swatinem/rust-cache@v2 - name: Check Rust Code - uses: actions-rs/cargo@v1 - with: - command: check - args: --all-targets + run: cargo check --all-targets - name: Clippy Rust Code - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all-targets -- -D clippy::pedantic + run: cargo clippy --all-targets -- -D clippy::pedantic - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Run Tests From 95a2cd19ab26fae1e503a4f756d95c441769cbcc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Feb 2023 13:34:58 +0000 Subject: [PATCH 300/435] refactor(udp): refactor tests to follow mod structure conventions Use the same dir/mod structure as in API and HTTP tracker integration tests. --- tests/common/fixtures.rs | 3 + tests/common/mod.rs | 1 + tests/common/udp.rs | 41 ++++++ tests/udp.rs | 310 --------------------------------------- tests/udp/asserts.rs | 23 +++ tests/udp/client.rs | 65 ++++++++ tests/udp/mod.rs | 3 + tests/udp/server.rs | 67 +++++++++ tests/udp_tracker.rs | 175 ++++++++++++++++++++++ 9 files changed, 378 insertions(+), 310 deletions(-) create mode 100644 tests/common/udp.rs delete mode 100644 tests/udp.rs create mode 100644 tests/udp/asserts.rs create mode 100644 tests/udp/client.rs create mode 100644 tests/udp/mod.rs create mode 100644 tests/udp/server.rs create mode 100644 tests/udp_tracker.rs diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 2abaca244..1ead0db0c 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -9,6 +9,7 @@ pub struct PeerBuilder { } impl PeerBuilder { + #[allow(dead_code)] pub fn default() -> PeerBuilder { Self { peer: default_peer_for_testing(), @@ -44,6 +45,7 @@ impl PeerBuilder { } } +#[allow(dead_code)] fn default_peer_for_testing() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000000"), @@ -56,6 +58,7 @@ fn default_peer_for_testing() -> Peer { } } +#[allow(dead_code)] pub fn invalid_info_hashes() -> Vec { [ "0".to_string(), diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 810620359..b57996292 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,2 +1,3 @@ pub mod fixtures; pub mod http; +pub mod udp; diff --git a/tests/common/udp.rs b/tests/common/udp.rs new file mode 100644 index 000000000..3d84e2b97 --- /dev/null +++ b/tests/common/udp.rs @@ -0,0 +1,41 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::net::UdpSocket; + +/// A generic UDP client +pub struct Client { + pub socket: Arc, +} + +impl Client { + #[allow(dead_code)] + pub async fn connected(remote_socket_addr: &SocketAddr, local_socket_addr: &SocketAddr) -> Client { + let client = Client::bind(local_socket_addr).await; + client.connect(remote_socket_addr).await; + client + } + + pub async fn bind(local_socket_addr: &SocketAddr) -> Self { + let socket = UdpSocket::bind(local_socket_addr).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } + + pub async fn connect(&self, remote_address: &SocketAddr) { + self.socket.connect(remote_address).await.unwrap(); + } + + #[allow(dead_code)] + pub async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + #[allow(dead_code)] + pub async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } +} diff --git a/tests/udp.rs b/tests/udp.rs deleted file mode 100644 index 408f4f795..000000000 --- a/tests/udp.rs +++ /dev/null @@ -1,310 +0,0 @@ -/// Integration tests for UDP tracker server -/// -/// cargo test `udp_tracker_server` -- --nocapture -extern crate rand; - -mod udp_tracker_server { - use core::panic; - use std::io::Cursor; - use std::net::Ipv4Addr; - use std::sync::atomic::{AtomicBool, Ordering}; - use std::sync::Arc; - - use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, - Port, Request, Response, ScrapeRequest, TransactionId, - }; - use rand::{thread_rng, Rng}; - use tokio::net::UdpSocket; - use tokio::task::JoinHandle; - use torrust_tracker::config::{ephemeral_configuration, Configuration}; - use torrust_tracker::jobs::udp_tracker; - use torrust_tracker::tracker::statistics::Keeper; - use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - - fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) - } - - pub fn ephemeral_random_client_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) - } - - pub struct UdpServer { - pub started: AtomicBool, - pub job: Option>, - pub bind_address: Option, - } - - impl UdpServer { - pub fn new() -> Self { - Self { - started: AtomicBool::new(false), - job: None, - bind_address: None, - } - } - - pub fn start(&mut self, configuration: &Arc) { - if !self.started.load(Ordering::Relaxed) { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - let udp_tracker_config = &configuration.udp_trackers[0]; - - // Start the UDP tracker job - self.job = Some(udp_tracker::start_job(udp_tracker_config, tracker)); - - self.bind_address = Some(udp_tracker_config.bind_address.clone()); - - self.started.store(true, Ordering::Relaxed); - } - } - } - - fn new_running_udp_server(configuration: &Arc) -> UdpServer { - let mut udp_server = UdpServer::new(); - udp_server.start(configuration); - udp_server - } - - struct UdpClient { - socket: Arc, - } - - impl UdpClient { - async fn bind(local_address: &str) -> Self { - let socket = UdpSocket::bind(local_address).await.unwrap(); - Self { - socket: Arc::new(socket), - } - } - - async fn connect(&self, remote_address: &str) { - self.socket.connect(remote_address).await.unwrap(); - } - - async fn send(&self, bytes: &[u8]) -> usize { - self.socket.writable().await.unwrap(); - self.socket.send(bytes).await.unwrap() - } - - async fn receive(&self, bytes: &mut [u8]) -> usize { - self.socket.readable().await.unwrap(); - self.socket.recv(bytes).await.unwrap() - } - } - - /// Creates a new `UdpClient` connected to a Udp server - async fn new_connected_udp_client(remote_address: &str) -> UdpClient { - let client = UdpClient::bind(&source_address(ephemeral_random_client_port())).await; - client.connect(remote_address).await; - client - } - - struct UdpTrackerClient { - pub udp_client: UdpClient, - } - - impl UdpTrackerClient { - async fn send(&self, request: Request) -> usize { - // Write request into a buffer - let request_buffer = vec![0u8; MAX_PACKET_SIZE]; - let mut cursor = Cursor::new(request_buffer); - - let request_data = match request.write(&mut cursor) { - Ok(_) => { - #[allow(clippy::cast_possible_truncation)] - let position = cursor.position() as usize; - let inner_request_buffer = cursor.get_ref(); - // Return slice which contains written request data - &inner_request_buffer[..position] - } - Err(e) => panic!("could not write request to bytes: {e}."), - }; - - self.udp_client.send(request_data).await - } - - async fn receive(&self) -> Response { - let mut response_buffer = [0u8; MAX_PACKET_SIZE]; - - let payload_size = self.udp_client.receive(&mut response_buffer).await; - - Response::from_bytes(&response_buffer[..payload_size], true).unwrap() - } - } - - /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server - async fn new_connected_udp_tracker_client(remote_address: &str) -> UdpTrackerClient { - let udp_client = new_connected_udp_client(remote_address).await; - UdpTrackerClient { udp_client } - } - - fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] - } - - fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] - } - - /// Generates the source address for the UDP client - fn source_address(port: u16) -> String { - format!("127.0.0.1:{port}") - } - - fn is_error_response(response: &Response, error_message: &str) -> bool { - match response { - Response::Error(error_response) => error_response.message.starts_with(error_message), - _ => false, - } - } - - fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { - match response { - Response::Connect(connect_response) => connect_response.transaction_id == transaction_id, - _ => false, - } - } - - fn is_ipv4_announce_response(response: &Response) -> bool { - matches!(response, Response::AnnounceIpv4(_)) - } - - fn is_scrape_response(response: &Response) -> bool { - matches!(response, Response::Scrape(_)) - } - - #[tokio::test] - async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let configuration = tracker_configuration(); - - let udp_server = new_running_udp_server(&configuration); - - let client = new_connected_udp_client(&udp_server.bind_address.unwrap()).await; - - client.send(&empty_udp_request()).await; - - let mut buffer = empty_buffer(); - client.receive(&mut buffer).await; - let response = Response::from_bytes(&buffer, true).unwrap(); - - assert!(is_error_response(&response, "bad request")); - } - - #[tokio::test] - async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { - let configuration = tracker_configuration(); - - let udp_server = new_running_udp_server(&configuration); - - let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123), - }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - assert!(is_connect_response(&response, TransactionId(123))); - } - - async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { - let connect_request = ConnectRequest { transaction_id }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), - } - } - - #[tokio::test] - async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { - let configuration = tracker_configuration(); - - let udp_server = new_running_udp_server(&configuration); - - let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - - let connection_id = send_connection_request(TransactionId(123), &client).await; - - // Send announce request - - let announce_request = AnnounceRequest { - connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), - info_hash: InfoHash([0u8; 20]), - peer_id: PeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client.udp_client.socket.local_addr().unwrap().port()), - }; - - client.send(announce_request.into()).await; - - let response = client.receive().await; - - assert!(is_ipv4_announce_response(&response)); - } - - #[tokio::test] - async fn should_return_a_scrape_response_when_the_client_sends_a_scrape_request() { - let configuration = tracker_configuration(); - - let udp_server = new_running_udp_server(&configuration); - - let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; - - let connection_id = send_connection_request(TransactionId(123), &client).await; - - // Send scrape request - - // Full scrapes are not allowed so it will return "bad request" error with empty vector - let info_hashes = vec![InfoHash([0u8; 20])]; - - let scrape_request = ScrapeRequest { - connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), - info_hashes, - }; - - client.send(scrape_request.into()).await; - - let response = client.receive().await; - - assert!(is_scrape_response(&response)); - } -} diff --git a/tests/udp/asserts.rs b/tests/udp/asserts.rs new file mode 100644 index 000000000..bf8fb6728 --- /dev/null +++ b/tests/udp/asserts.rs @@ -0,0 +1,23 @@ +use aquatic_udp_protocol::{Response, TransactionId}; + +pub fn is_error_response(response: &Response, error_message: &str) -> bool { + match response { + Response::Error(error_response) => error_response.message.starts_with(error_message), + _ => false, + } +} + +pub fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { + match response { + Response::Connect(connect_response) => connect_response.transaction_id == transaction_id, + _ => false, + } +} + +pub fn is_ipv4_announce_response(response: &Response) -> bool { + matches!(response, Response::AnnounceIpv4(_)) +} + +pub fn is_scrape_response(response: &Response) -> bool { + matches!(response, Response::Scrape(_)) +} diff --git a/tests/udp/client.rs b/tests/udp/client.rs new file mode 100644 index 000000000..3cb4d6134 --- /dev/null +++ b/tests/udp/client.rs @@ -0,0 +1,65 @@ +use std::io::Cursor; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use aquatic_udp_protocol::{Request, Response}; +use rand::{thread_rng, Rng}; +use torrust_tracker::udp::MAX_PACKET_SIZE; + +use crate::common::udp::Client as UdpClient; + +/// Creates a new generic UDP client connected to a generic UDP server +pub async fn new_udp_client_connected(remote_address: &SocketAddr) -> UdpClient { + let local_address = loopback_socket_address(ephemeral_random_client_port()); + UdpClient::connected(remote_address, &local_address).await +} + +/// Creates a new UDP tracker client connected to a UDP tracker server +pub async fn new_udp_tracker_client_connected(remote_address: &SocketAddr) -> Client { + let udp_client = new_udp_client_connected(remote_address).await; + Client { udp_client } +} + +pub fn ephemeral_random_client_port() -> u16 { + // todo: this may produce random test failures because two tests can try to bind the same port. + // We could create a pool of available ports (with read/write lock) + let mut rng = thread_rng(); + rng.gen_range(49152..65535) +} + +fn loopback_socket_address(port: u16) -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port) +} + +/// A UDP tracker client +pub struct Client { + pub udp_client: UdpClient, // A generic UDP client +} + +impl Client { + pub async fn send(&self, request: Request) -> usize { + // Write request into a buffer + let request_buffer = vec![0u8; MAX_PACKET_SIZE]; + let mut cursor = Cursor::new(request_buffer); + + let request_data = match request.write(&mut cursor) { + Ok(_) => { + #[allow(clippy::cast_possible_truncation)] + let position = cursor.position() as usize; + let inner_request_buffer = cursor.get_ref(); + // Return slice which contains written request data + &inner_request_buffer[..position] + } + Err(e) => panic!("could not write request to bytes: {e}."), + }; + + self.udp_client.send(request_data).await + } + + pub async fn receive(&self) -> Response { + let mut response_buffer = [0u8; MAX_PACKET_SIZE]; + + let payload_size = self.udp_client.receive(&mut response_buffer).await; + + Response::from_bytes(&response_buffer[..payload_size], true).unwrap() + } +} diff --git a/tests/udp/mod.rs b/tests/udp/mod.rs new file mode 100644 index 000000000..16a77bb99 --- /dev/null +++ b/tests/udp/mod.rs @@ -0,0 +1,3 @@ +pub mod asserts; +pub mod client; +pub mod server; diff --git a/tests/udp/server.rs b/tests/udp/server.rs new file mode 100644 index 000000000..401d4cf92 --- /dev/null +++ b/tests/udp/server.rs @@ -0,0 +1,67 @@ +use std::net::SocketAddr; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker::config::{ephemeral_configuration, Configuration}; +use torrust_tracker::jobs::udp_tracker; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; + +pub fn start_udp_tracker(configuration: &Arc) -> Server { + let mut udp_server = Server::new(); + udp_server.start(configuration); + udp_server +} + +pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) +} +pub struct Server { + pub started: AtomicBool, + pub job: Option>, + pub bind_address: Option, +} + +impl Server { + pub fn new() -> Self { + Self { + started: AtomicBool::new(false), + job: None, + bind_address: None, + } + } + + pub fn start(&mut self, configuration: &Arc) { + if !self.started.load(Ordering::Relaxed) { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + let udp_tracker_config = &configuration.udp_trackers[0]; + + // Start the UDP tracker job + self.job = Some(udp_tracker::start_job(udp_tracker_config, tracker)); + + self.bind_address = Some(udp_tracker_config.bind_address.parse().unwrap()); + + self.started.store(true, Ordering::Relaxed); + } + } +} diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs new file mode 100644 index 000000000..0287d01b7 --- /dev/null +++ b/tests/udp_tracker.rs @@ -0,0 +1,175 @@ +/// Integration tests for UDP tracker server +/// +/// cargo test `udp_tracker_server` -- --nocapture +extern crate rand; + +mod common; +mod udp; + +mod udp_tracker_server { + + // UDP tracker documentation: + // + // BEP 15. UDP Tracker Protocol for BitTorrent + // https://www.bittorrent.org/beps/bep_0015.html + + use core::panic; + + use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; + use torrust_tracker::udp::MAX_PACKET_SIZE; + + use crate::udp::asserts::is_error_response; + use crate::udp::client::{new_udp_client_connected, Client}; + use crate::udp::server::{start_udp_tracker, tracker_configuration}; + + fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] + } + + fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] + } + + async fn send_connection_request(transaction_id: TransactionId, client: &Client) -> ConnectionId { + let connect_request = ConnectRequest { transaction_id }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + } + } + + #[tokio::test] + async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + let configuration = tracker_configuration(); + + let udp_server = start_udp_tracker(&configuration); + + let client = new_udp_client_connected(&udp_server.bind_address.unwrap()).await; + + client.send(&empty_udp_request()).await; + + let mut buffer = empty_buffer(); + client.receive(&mut buffer).await; + let response = Response::from_bytes(&buffer, true).unwrap(); + + assert!(is_error_response(&response, "bad request")); + } + + mod receiving_a_connection_request { + use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + + use crate::udp::asserts::is_connect_response; + use crate::udp::client::new_udp_tracker_client_connected; + use crate::udp::server::{start_udp_tracker, tracker_configuration}; + + #[tokio::test] + async fn should_return_a_connect_response() { + let configuration = tracker_configuration(); + + let udp_server = start_udp_tracker(&configuration); + + let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + assert!(is_connect_response(&response, TransactionId(123))); + } + } + + mod receiving_an_announce_request { + use std::net::Ipv4Addr; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, + TransactionId, + }; + + use crate::udp::asserts::is_ipv4_announce_response; + use crate::udp::client::new_udp_tracker_client_connected; + use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp_tracker_server::send_connection_request; + + #[tokio::test] + async fn should_return_an_announce_response() { + let configuration = tracker_configuration(); + + let udp_server = start_udp_tracker(&configuration); + + let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send announce request + + let announce_request = AnnounceRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hash: InfoHash([0u8; 20]), + peer_id: PeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client.udp_client.socket.local_addr().unwrap().port()), + }; + + client.send(announce_request.into()).await; + + let response = client.receive().await; + + assert!(is_ipv4_announce_response(&response)); + } + } + + mod receiving_an_scrape_request { + use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + + use crate::udp::asserts::is_scrape_response; + use crate::udp::client::new_udp_tracker_client_connected; + use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp_tracker_server::send_connection_request; + + #[tokio::test] + async fn should_return_a_scrape_response() { + let configuration = tracker_configuration(); + + let udp_server = start_udp_tracker(&configuration); + + let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send scrape request + + // Full scrapes are not allowed you need to pass an array of info hashes otherwise + // it will return "bad request" error with empty vector + let info_hashes = vec![InfoHash([0u8; 20])]; + + let scrape_request = ScrapeRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hashes, + }; + + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + assert!(is_scrape_response(&response)); + } + } +} From 895592795766a6151e45b6b80f29aa3293d36f59 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 17:39:37 +0100 Subject: [PATCH 301/435] dev: check errors to contain responce --- cSpell.json | 2 + tests/api/asserts.rs | 26 +++++++--- tests/http/asserts.rs | 112 +++++++++++++++++------------------------- 3 files changed, 68 insertions(+), 72 deletions(-) diff --git a/cSpell.json b/cSpell.json index dc51c87c5..9f10d99e4 100644 --- a/cSpell.json +++ b/cSpell.json @@ -28,6 +28,7 @@ "Hydranode", "incompletei", "infohash", + "infohashes", "infoschema", "intervali", "leecher", @@ -58,6 +59,7 @@ "sharktorrent", "socketaddr", "sqllite", + "subsec", "Swatinem", "Swiftbit", "thiserror", diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 5f9d39705..5a4abfb62 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -37,9 +37,20 @@ pub async fn assert_auth_key_utf8(response: Response) -> AuthKey { // OK response pub async fn assert_ok(response: Response) { - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - assert_eq!(response.text().await.unwrap(), "{\"status\":\"ok\"}"); + let response_status = response.status(); + let response_headers = response.headers().get("content-type").cloned().unwrap(); + let response_text = response.text().await.unwrap(); + + let details = format!( + r#" + status: Ā“{response_status}Ā“ + headers: Ā“{response_headers:?}Ā“ + text: Ā“"{response_text}"Ā“"# + ); + + assert_eq!(response_status, 200, "details:{details}."); + assert_eq!(response_headers, "application/json", "\ndetails:{details}."); + assert_eq!(response_text, "{\"status\":\"ok\"}", "\ndetails:{details}."); } // Error responses @@ -118,8 +129,11 @@ pub async fn assert_failed_to_reload_keys(response: Response) { async fn assert_unhandled_rejection(response: Response, reason: &str) { assert_eq!(response.status(), 500); assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); - assert_eq!( - response.text().await.unwrap(), - format!("Unhandled rejection: Err {{ reason: \"{reason}\" }}") + + let reason_text = format!("Unhandled rejection: Err {{ reason: \"{reason}"); + let response_text = response.text().await.unwrap(); + assert!( + response_text.contains(&reason_text), + ":\n response: `\"{response_text}\"`\n dose not contain: `\"{reason_text}\"`." ); } diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 59f4ed42a..cd6bcb499 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -1,9 +1,27 @@ +use std::panic::Location; + use reqwest::Response; use super::responses::announce::{Announce, Compact, DeserializedCompact}; use super::responses::scrape; use crate::http::responses::error::Error; +pub fn assert_error_bencoded(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { + let error_failure_reason = serde_bencode::from_str::(response_text) + .unwrap_or_else(|_| panic!( + "response body should be a valid bencoded string for the '{expected_failure_reason}' error, got \"{response_text}\"" + ) + ) + .failure_reason; + + assert!( + error_failure_reason.contains(expected_failure_reason), + r#": + response: `"{error_failure_reason}"` + dose not contain: `"{expected_failure_reason}"`, {location}"# + ); +} + pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); @@ -64,90 +82,52 @@ pub async fn assert_is_announce_response(response: Response) { pub async fn assert_internal_server_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'internal server' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "internal server error".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded(&response.text().await.unwrap(), "internal server", Location::caller()); } pub async fn assert_invalid_info_hash_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'invalid info_hash' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "info_hash is either missing or invalid".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded( + &response.text().await.unwrap(), + "info_hash is either missing or invalid", + Location::caller(), + ); } pub async fn assert_invalid_peer_id_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'invalid peer id' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "peer_id is either missing or invalid".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded( + &response.text().await.unwrap(), + "peer_id is either missing or invalid", + Location::caller(), + ); } pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'torrent not on whitelist' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "torrent not on whitelist".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded( + &response.text().await.unwrap(), + "torrent not on whitelist", + Location::caller(), + ); } pub async fn assert_peer_not_authenticated_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'peer not authenticated' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "peer not authenticated".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded(&response.text().await.unwrap(), "peer not authenticated", Location::caller()); } pub async fn assert_invalid_authentication_key_error_response(response: Response) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let error_response: Error = serde_bencode::from_str(&body).unwrap_or_else(|_| { - panic!( - "response body should be a valid bencoded string for the 'invalid authentication key' error, got \"{}\"", - &body - ) - }); - let expected_error_response = Error { - failure_reason: "invalid authentication key".to_string(), - }; - assert_eq!(error_response, expected_error_response); + + assert_error_bencoded( + &response.text().await.unwrap(), + "invalid authentication key", + Location::caller(), + ); } From ff9d379d80f90d9778deca8daae91c177b4158da Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:28:34 +0100 Subject: [PATCH 302/435] dev: edit api to pass-through underlying error --- src/apis/handlers.rs | 20 ++++++++++---------- src/apis/responses.rs | 26 ++++++++++++++------------ 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index 8d9689025..38959edbe 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -66,8 +66,8 @@ pub async fn add_torrent_to_whitelist_handler( match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => ok_response(), - Err(..) => failed_to_whitelist_torrent_response(), + Ok(_) => ok_response(), + Err(e) => failed_to_whitelist_torrent_response(e), }, } } @@ -79,16 +79,16 @@ pub async fn remove_torrent_from_whitelist_handler( match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(..) => ok_response(), - Err(..) => failed_to_remove_torrent_from_whitelist_response(), + Ok(_) => ok_response(), + Err(e) => failed_to_remove_torrent_from_whitelist_response(e), }, } } pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { match tracker.load_whitelist().await { - Ok(..) => ok_response(), - Err(..) => failed_to_reload_whitelist_response(), + Ok(_) => ok_response(), + Err(e) => failed_to_reload_whitelist_response(e), } } @@ -96,7 +96,7 @@ pub async fn generate_auth_key_handler(State(tracker): State>, Path let seconds_valid = seconds_valid_or_key; match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), - Err(_) => failed_to_generate_key_response(), + Err(e) => failed_to_generate_key_response(e), } } @@ -111,15 +111,15 @@ pub async fn delete_auth_key_handler( Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { Ok(_) => ok_response(), - Err(_) => failed_to_delete_key_response(), + Err(e) => failed_to_delete_key_response(e), }, } } pub async fn reload_keys_handler(State(tracker): State>) -> Response { match tracker.load_keys().await { - Ok(..) => ok_response(), - Err(..) => failed_to_reload_keys_response(), + Ok(_) => ok_response(), + Err(e) => failed_to_reload_keys_response(e), } } diff --git a/src/apis/responses.rs b/src/apis/responses.rs index b150b4bff..3b0946396 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -1,3 +1,5 @@ +use std::error::Error; + use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Json, Response}; use serde::Serialize; @@ -110,33 +112,33 @@ pub fn torrent_not_known_response() -> Response { } #[must_use] -pub fn failed_to_remove_torrent_from_whitelist_response() -> Response { - unhandled_rejection_response("failed to remove torrent from whitelist".to_string()) +pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to remove torrent from whitelist: {e}")) } #[must_use] -pub fn failed_to_whitelist_torrent_response() -> Response { - unhandled_rejection_response("failed to whitelist torrent".to_string()) +pub fn failed_to_whitelist_torrent_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to whitelist torrent: {e}")) } #[must_use] -pub fn failed_to_reload_whitelist_response() -> Response { - unhandled_rejection_response("failed to reload whitelist".to_string()) +pub fn failed_to_reload_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload whitelist: {e}")) } #[must_use] -pub fn failed_to_generate_key_response() -> Response { - unhandled_rejection_response("failed to generate key".to_string()) +pub fn failed_to_generate_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to generate key: {e}")) } #[must_use] -pub fn failed_to_delete_key_response() -> Response { - unhandled_rejection_response("failed to delete key".to_string()) +pub fn failed_to_delete_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to delete key: {e}")) } #[must_use] -pub fn failed_to_reload_keys_response() -> Response { - unhandled_rejection_response("failed to reload keys".to_string()) +pub fn failed_to_reload_keys_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload keys: {e}")) } /// This error response is to keep backward compatibility with the old Warp API. From aa0f371d5b1b91770ce4c75634a2087abb82327f Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:12:50 +0100 Subject: [PATCH 303/435] dev: add located error module --- src/lib.rs | 1 + src/located_error.rs | 103 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 src/located_error.rs diff --git a/src/lib.rs b/src/lib.rs index e8cf53045..cbda2854c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,7 @@ pub mod config; pub mod databases; pub mod http; pub mod jobs; +pub mod located_error; pub mod logging; pub mod protocol; pub mod setup; diff --git a/src/located_error.rs b/src/located_error.rs new file mode 100644 index 000000000..d45517e5a --- /dev/null +++ b/src/located_error.rs @@ -0,0 +1,103 @@ +// https://stackoverflow.com/questions/74336993/getting-line-numbers-with-when-using-boxdyn-stderrorerror + +use std::error::Error; +use std::panic::Location; +use std::sync::Arc; + +pub struct Located(pub E); + +#[derive(Debug)] +pub struct LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + source: Arc, + location: Box>, +} + +impl<'a, E> std::fmt::Display for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}, {}", self.source, self.location) + } +} + +impl<'a, E> Error for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync + 'static, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + Some(&self.source) + } +} + +impl<'a, E> Clone for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn clone(&self) -> Self { + LocatedError { + source: self.source.clone(), + location: self.location.clone(), + } + } +} + +#[allow(clippy::from_over_into)] +impl<'a, E> Into> for Located +where + E: Error + Send + Sync, + Arc: Clone, +{ + #[track_caller] + fn into(self) -> LocatedError<'a, E> { + let e = LocatedError { + source: Arc::new(self.0), + location: Box::new(*std::panic::Location::caller()), + }; + log::debug!("{e}"); + e + } +} + +#[allow(clippy::from_over_into)] +impl<'a> Into> for Arc { + #[track_caller] + fn into(self) -> LocatedError<'a, dyn std::error::Error + Send + Sync> { + LocatedError { + source: self, + location: Box::new(*std::panic::Location::caller()), + } + } +} + +#[cfg(test)] +mod tests { + use std::panic::Location; + + use super::LocatedError; + use crate::located_error::Located; + + #[derive(thiserror::Error, Debug)] + enum TestError { + #[error("Test")] + Test, + } + + #[track_caller] + fn get_caller_location() -> Location<'static> { + *Location::caller() + } + + #[test] + fn error_should_include_location() { + let e = TestError::Test; + + let b: LocatedError = Located(e).into(); + let l = get_caller_location(); + + assert_eq!(b.location.file(), l.file()); + } +} From 41501397b7c05a24ad03f6a7ac7191292388cd51 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:16:04 +0100 Subject: [PATCH 304/435] dev: located error for database --- src/databases/driver.rs | 25 ++++- src/databases/error.rs | 100 ++++++++++++++++--- src/databases/mod.rs | 70 ++++++++------ src/databases/mysql.rs | 186 +++++++++++++++-------------------- src/databases/sqlite.rs | 210 +++++++++++++++++++--------------------- src/tracker/mod.rs | 5 +- 6 files changed, 329 insertions(+), 267 deletions(-) diff --git a/src/databases/driver.rs b/src/databases/driver.rs index 7eaa9064e..c601f1866 100644 --- a/src/databases/driver.rs +++ b/src/databases/driver.rs @@ -1,7 +1,30 @@ use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +use super::error::Error; +use super::mysql::Mysql; +use super::sqlite::Sqlite; +use super::{Builder, Database}; + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] pub enum Driver { Sqlite3, MySQL, } + +impl Driver { + /// . + /// + /// # Errors + /// + /// This function will return an error if unable to connect to the database. + pub fn build(&self, db_path: &str) -> Result, Error> { + let database = match self { + Driver::Sqlite3 => Builder::::build(db_path), + Driver::MySQL => Builder::::build(db_path), + }?; + + database.create_database_tables().expect("Could not create database tables."); + + Ok(database) + } +} diff --git a/src/databases/error.rs b/src/databases/error.rs index 467db407f..4bee82f19 100644 --- a/src/databases/error.rs +++ b/src/databases/error.rs @@ -1,21 +1,95 @@ -use derive_more::{Display, Error}; +use std::panic::Location; +use std::sync::Arc; -#[derive(Debug, Display, PartialEq, Eq, Error)] -#[allow(dead_code)] +use r2d2_mysql::mysql::UrlError; + +use super::driver::Driver; +use crate::located_error::{Located, LocatedError}; + +#[derive(thiserror::Error, Debug, Clone)] pub enum Error { - #[display(fmt = "Query returned no rows.")] - QueryReturnedNoRows, - #[display(fmt = "Invalid query.")] - InvalidQuery, - #[display(fmt = "Database error.")] - DatabaseError, + #[error("The {driver} query unexpectedly returned nothing: {source}")] + QueryReturnedNoRows { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + driver: Driver, + }, + + #[error("The {driver} query was malformed: {source}")] + InvalidQuery { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + driver: Driver, + }, + + #[error("Unable to insert record into {driver} database, {location}")] + InsertFailed { + location: &'static Location<'static>, + driver: Driver, + }, + + #[error("Failed to remove record from {driver} database, error-code: {error_code}, {location}")] + DeleteFailed { + location: &'static Location<'static>, + error_code: usize, + driver: Driver, + }, + + #[error("Failed to connect to {driver} database: {source}")] + ConnectionError { + source: LocatedError<'static, UrlError>, + driver: Driver, + }, + + #[error("Failed to create r2d2 {driver} connection pool: {source}")] + ConnectionPool { + source: LocatedError<'static, r2d2::Error>, + driver: Driver, + }, } impl From for Error { - fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - match e { - r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, - _ => Error::InvalidQuery, + #[track_caller] + fn from(err: r2d2_sqlite::rusqlite::Error) -> Self { + match err { + r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows { + source: (Arc::new(err) as Arc).into(), + driver: Driver::Sqlite3, + }, + _ => Error::InvalidQuery { + source: (Arc::new(err) as Arc).into(), + driver: Driver::Sqlite3, + }, + } + } +} + +impl From for Error { + #[track_caller] + fn from(err: r2d2_mysql::mysql::Error) -> Self { + let e: Arc = Arc::new(err); + Error::InvalidQuery { + source: e.into(), + driver: Driver::MySQL, + } + } +} + +impl From for Error { + #[track_caller] + fn from(err: UrlError) -> Self { + Self::ConnectionError { + source: Located(err).into(), + driver: Driver::MySQL, + } + } +} + +impl From<(r2d2::Error, Driver)> for Error { + #[track_caller] + fn from(e: (r2d2::Error, Driver)) -> Self { + let (err, driver) = e; + Self::ConnectionPool { + source: Located(err).into(), + driver, } } } diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 873dd70eb..809decc2c 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -3,37 +3,48 @@ pub mod error; pub mod mysql; pub mod sqlite; +use std::marker::PhantomData; + use async_trait::async_trait; -use self::driver::Driver; use self::error::Error; -use crate::databases::mysql::Mysql; -use crate::databases::sqlite::Sqlite; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; -/// # Errors -/// -/// Will return `r2d2::Error` if `db_path` is not able to create a database. -pub fn connect(db_driver: &Driver, db_path: &str) -> Result, r2d2::Error> { - let database: Box = match db_driver { - Driver::Sqlite3 => { - let db = Sqlite::new(db_path)?; - Box::new(db) - } - Driver::MySQL => { - let db = Mysql::new(db_path)?; - Box::new(db) - } - }; - - database.create_database_tables().expect("Could not create database tables."); - - Ok(database) +pub(self) struct Builder +where + T: Database, +{ + phantom: PhantomData, +} + +impl Builder +where + T: Database + 'static, +{ + /// . + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create a database. + pub(self) fn build(db_path: &str) -> Result, Error> { + Ok(Box::new(T::new(db_path)?)) + } } #[async_trait] pub trait Database: Sync + Send { + /// . + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create a database. + fn new(db_path: &str) -> Result + where + Self: std::marker::Sized; + + /// . + /// /// # Errors /// /// Will return `Error` if unable to create own tables. @@ -52,27 +63,22 @@ pub trait Database: Sync + Send { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error>; async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - async fn get_key_from_keys(&self, key: &str) -> Result; + async fn get_key_from_keys(&self, key: &str) -> Result, Error>; async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - self.get_info_hash_from_whitelist(&info_hash.clone().to_string()) - .await - .map_or_else( - |e| match e { - Error::QueryReturnedNoRows => Ok(false), - e => Err(e), - }, - |_| Ok(true), - ) + Ok(self + .get_info_hash_from_whitelist(&info_hash.clone().to_string()) + .await? + .is_some()) } } diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 71b06378c..ac54ebb82 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -8,33 +8,32 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MysqlConnectionManager; +use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; +const DRIVER: Driver = Driver::MySQL; + pub struct Mysql { pool: Pool, } -impl Mysql { +#[async_trait] +impl Database for Mysql { /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create `MySQL` database. - pub fn new(db_path: &str) -> Result { - let opts = Opts::from_url(db_path).expect("Failed to connect to MySQL database."); + fn new(db_path: &str) -> Result { + let opts = Opts::from_url(db_path)?; let builder = OptsBuilder::from_opts(opts); let manager = MysqlConnectionManager::new(builder); - let pool = r2d2::Pool::builder() - .build(manager) - .expect("Failed to create r2d2 MySQL connection pool."); + let pool = r2d2::Pool::builder().build(manager).map_err(|e| (e, DRIVER))?; Ok(Self { pool }) } -} -#[async_trait] -impl Database for Mysql { fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -63,7 +62,7 @@ impl Database for Mysql { i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") ); - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; conn.query_drop(&create_torrents_table) .expect("Could not create torrents table."); @@ -87,7 +86,7 @@ impl Database for Mysql { DROP TABLE `keys`;" .to_string(); - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; conn.query_drop(&drop_whitelist_table) .expect("Could not drop `whitelist` table."); @@ -99,155 +98,124 @@ impl Database for Mysql { } async fn load_persistent_torrents(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - let torrents: Vec<(InfoHash, u32)> = conn - .query_map( - "SELECT info_hash, completed FROM torrents", - |(info_hash_string, completed): (String, u32)| { - let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); - (info_hash, completed) - }, - ) - .map_err(|_| Error::QueryReturnedNoRows)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let torrents = conn.query_map( + "SELECT info_hash, completed FROM torrents", + |(info_hash_string, completed): (String, u32)| { + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + (info_hash, completed) + }, + )?; Ok(torrents) } async fn load_keys(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - let keys: Vec = conn - .query_map( - "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| auth::Key { - key, - valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), - }, - ) - .map_err(|_| Error::QueryReturnedNoRows)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let keys = conn.query_map( + "SELECT `key`, valid_until FROM `keys`", + |(key, valid_until): (String, i64)| auth::Key { + key, + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + }, + )?; Ok(keys) } async fn load_whitelist(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let info_hashes: Vec = conn - .query_map("SELECT info_hash FROM whitelist", |info_hash: String| { - InfoHash::from_str(&info_hash).unwrap() - }) - .map_err(|_| Error::QueryReturnedNoRows)?; + let info_hashes = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { + InfoHash::from_str(&info_hash).unwrap() + })?; Ok(info_hashes) } async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash_str = info_hash.to_string(); debug!("{}", info_hash_str); - match conn.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)", params! { info_hash_str, completed }) { - Ok(_) => { - Ok(()) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn - .exec_first::( - "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", - params! { info_hash }, - ) - .map_err(|_| Error::DatabaseError)? - { - Some(info_hash) => Ok(InfoHash::from_str(&info_hash).unwrap()), - None => Err(Error::QueryReturnedNoRows), - } + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let select = conn.exec_first::( + "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", + params! { info_hash }, + )?; + + let info_hash = select.map(|f| InfoHash::from_str(&f).expect("Failed to decode InfoHash String from DB!")); + + Ok(info_hash) } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash_str = info_hash.to_string(); - match conn.exec_drop( + conn.exec_drop( "INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", params! { info_hash_str }, - ) { - Ok(_) => Ok(1), - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + )?; + + Ok(1) } async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash = info_hash.to_string(); - match conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }) { - Ok(_) => Ok(1), - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash })?; + + Ok(1) } - async fn get_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - match conn - .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) - .map_err(|_| Error::QueryReturnedNoRows)? - { - Some((key, valid_until)) => Ok(auth::Key { - key, - valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), - }), - None => Err(Error::InvalidQuery), - } + let query = + conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }); + + let key = query?; + + Ok(key.map(|(key, expiry)| auth::Key { + key, + valid_until: Some(Duration::from_secs(expiry.unsigned_abs())), + })) } async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); - match conn.exec_drop( + conn.exec_drop( "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", params! { key, valid_until }, - ) { - Ok(_) => Ok(1), - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + )?; + + Ok(1) } async fn remove_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { - Ok(_) => Ok(1), - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key })?; + + Ok(1) } } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 1d7caf052..3425b15c8 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -1,32 +1,32 @@ +use std::panic::Location; use std::str::FromStr; use async_trait::async_trait; -use log::debug; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; +use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth; +const DRIVER: Driver = Driver::Sqlite3; + pub struct Sqlite { pool: Pool, } -impl Sqlite { +#[async_trait] +impl Database for Sqlite { /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. - pub fn new(db_path: &str) -> Result { + fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); - let pool = Pool::new(cm).expect("Failed to create r2d2 SQLite connection pool."); - Ok(Sqlite { pool }) + Pool::new(cm).map_or_else(|err| Err((err, Driver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) } -} -#[async_trait] -impl Database for Sqlite { fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -51,13 +51,13 @@ impl Database for Sqlite { );" .to_string(); - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.execute(&create_whitelist_table, [])?; + conn.execute(&create_keys_table, [])?; + conn.execute(&create_torrents_table, [])?; - conn.execute(&create_whitelist_table, []) - .and_then(|_| conn.execute(&create_keys_table, [])) - .and_then(|_| conn.execute(&create_torrents_table, [])) - .map_err(|_| Error::InvalidQuery) - .map(|_| ()) + Ok(()) } fn drop_database_tables(&self) -> Result<(), Error> { @@ -73,17 +73,17 @@ impl Database for Sqlite { DROP TABLE keys;" .to_string(); - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; conn.execute(&drop_whitelist_table, []) .and_then(|_| conn.execute(&drop_torrents_table, [])) - .and_then(|_| conn.execute(&drop_keys_table, [])) - .map_err(|_| Error::InvalidQuery) - .map(|_| ()) + .and_then(|_| conn.execute(&drop_keys_table, []))?; + + Ok(()) } async fn load_persistent_torrents(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -94,13 +94,16 @@ impl Database for Sqlite { Ok((info_hash, completed)) })?; + //torrent_iter?; + //let torrent_iter = torrent_iter.unwrap(); + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); Ok(torrents) } async fn load_keys(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -120,7 +123,7 @@ impl Database for Sqlite { } async fn load_whitelist(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; @@ -136,130 +139,117 @@ impl Database for Sqlite { } async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - match conn.execute( + let insert = conn.execute( "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", [info_hash.to_string(), completed.to_string()], - ) { - Ok(updated) => { - if updated > 0 { - return Ok(()); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(()) } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; + let mut rows = stmt.query([info_hash])?; - match rows.next() { - Ok(row) => match row { - Some(row) => Ok(InfoHash::from_str(&row.get_unwrap::<_, String>(0)).unwrap()), - None => Err(Error::QueryReturnedNoRows), - }, - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } - } + let query = rows.next()?; + + Ok(query.map(|f| InfoHash::from_str(&f.get_unwrap::<_, String>(0)).unwrap())) } async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { - return Ok(updated); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()])?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(insert) } } async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { - return Ok(updated); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let deleted = conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()])?; + + if deleted == 1 { + // should only remove a single record. + Ok(deleted) + } else { + Err(Error::DeleteFailed { + location: Location::caller(), + error_code: deleted, + driver: DRIVER, + }) } } - async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; + let mut rows = stmt.query([key.to_string()])?; - if let Some(row) = rows.next()? { - let key: String = row.get(0).unwrap(); - let valid_until: i64 = row.get(1).unwrap(); + let key = rows.next()?; - Ok(auth::Key { - key, - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), - }) - } else { - Err(Error::QueryReturnedNoRows) - } + Ok(key.map(|f| { + let expiry: i64 = f.get(1).unwrap(); + auth::Key { + key: f.get(0).unwrap(), + valid_until: Some(DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs())), + } + })) } async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - match conn.execute( + let insert = conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", [auth_key.key.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], - ) { - Ok(updated) => { - if updated > 0 { - return Ok(updated); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(insert) } } async fn remove_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - - match conn.execute("DELETE FROM keys WHERE key = ?", [key]) { - Ok(updated) => { - if updated > 0 { - return Ok(updated); - } - Err(Error::QueryReturnedNoRows) - } - Err(e) => { - debug!("{:?}", e); - Err(Error::InvalidQuery) - } + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key])?; + + if deleted == 1 { + // should only remove a single record. + Ok(deleted) + } else { + Err(Error::DeleteFailed { + location: Location::caller(), + error_code: deleted, + driver: DRIVER, + }) } } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 4f1dab49b..e3b3cf88b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -15,6 +15,7 @@ use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; use crate::config::Configuration; +use crate::databases::driver::Driver; use crate::databases::{self, Database}; use crate::protocol::info_hash::InfoHash; @@ -45,8 +46,8 @@ impl Tracker { config: &Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, - ) -> Result { - let database = databases::connect(&config.db_driver, &config.db_path)?; + ) -> Result { + let database = Driver::build(&config.db_driver, &config.db_path)?; Ok(Tracker { config: config.clone(), From 14e1c8f8de737321b116b70c6777952597c3c133 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:35:02 +0100 Subject: [PATCH 305/435] dev: located config errors --- src/config.rs | 87 +++++++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 45 deletions(-) diff --git a/src/config.rs b/src/config.rs index 3ca4b37d8..7ed0f9fa7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,16 +1,21 @@ use std::collections::{HashMap, HashSet}; use std::net::IpAddr; +use std::panic::Location; use std::path::Path; use std::str::FromStr; +use std::sync::Arc; use std::{env, fs}; use config::{Config, ConfigError, File, FileFormat}; +use log::warn; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; +use thiserror::Error; use {std, toml}; use crate::databases::driver::Driver; +use crate::located_error::{Located, LocatedError}; use crate::tracker::mode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] @@ -74,13 +79,30 @@ pub struct Configuration { pub http_api: HttpApi, } -#[derive(Debug)] +#[derive(Error, Debug)] pub enum Error { - Message(String), - ConfigError(ConfigError), - IOError(std::io::Error), - ParseError(toml::de::Error), - TrackerModeIncompatible, + #[error("Unable to load from Environmental Variable: {source}")] + UnableToLoadFromEnvironmentVariable { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("Default configuration created at: `{path}`, please review and reload tracker, {location}")] + CreatedNewConfigHalt { + location: &'static Location<'static>, + path: String, + }, + + #[error("Failed processing the configuration: {source}")] + ConfigError { source: LocatedError<'static, ConfigError> }, +} + +impl From for Error { + #[track_caller] + fn from(err: ConfigError) -> Self { + Self::ConfigError { + source: Located(err).into(), + } + } } /// This configuration is used for testing. It generates random config values so they do not collide @@ -129,20 +151,6 @@ fn random_port() -> u16 { rng.gen_range(49152..65535) } -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Error::Message(e) => e.fmt(f), - Error::ConfigError(e) => e.fmt(f), - Error::IOError(e) => e.fmt(f), - Error::ParseError(e) => e.fmt(f), - Error::TrackerModeIncompatible => write!(f, "{self:?}"), - } - } -} - -impl std::error::Error for Error {} - impl Default for Configuration { fn default() -> Self { let mut configuration = Configuration { @@ -210,21 +218,19 @@ impl Configuration { let mut config = Config::default(); if Path::new(path).exists() { - config = config_builder - .add_source(File::with_name(path)) - .build() - .map_err(Error::ConfigError)?; + config = config_builder.add_source(File::with_name(path)).build()?; } else { - eprintln!("No config file found."); - eprintln!("Creating config file.."); + warn!("No config file found."); + warn!("Creating config file.."); let config = Configuration::default(); config.save_to_file(path)?; - return Err(Error::Message( - "Please edit the config.TOML and restart the tracker.".to_string(), - )); + return Err(Error::CreatedNewConfigHalt { + location: Location::caller(), + path: path.to_string(), + }); } - let torrust_config: Configuration = config.try_deserialize().map_err(Error::ConfigError)?; + let torrust_config: Configuration = config.try_deserialize()?; Ok(torrust_config) } @@ -237,15 +243,13 @@ impl Configuration { Ok(config_toml) => { let config_builder = Config::builder() .add_source(File::from_str(&config_toml, FileFormat::Toml)) - .build() - .map_err(Error::ConfigError)?; - let config = config_builder.try_deserialize().map_err(Error::ConfigError)?; + .build()?; + let config = config_builder.try_deserialize()?; Ok(config) } - Err(_) => Err(Error::Message(format!( - "No environment variable for configuration found: {}", - &config_env_var_name - ))), + Err(e) => Err(Error::UnableToLoadFromEnvironmentVariable { + source: (Arc::new(e) as Arc).into(), + }), } } @@ -262,7 +266,7 @@ impl Configuration { #[cfg(test)] mod tests { - use crate::config::{Configuration, Error}; + use crate::config::Configuration; #[cfg(test)] fn default_config_toml() -> String { @@ -381,13 +385,6 @@ mod tests { assert_eq!(configuration, Configuration::default()); } - #[test] - fn configuration_error_could_be_displayed() { - let error = Error::TrackerModeIncompatible; - - assert_eq!(format!("{error}"), "TrackerModeIncompatible"); - } - #[test] fn http_api_configuration_should_check_if_it_contains_a_token() { let configuration = Configuration::default(); From 4e0c99314a619ccf67b0993b2ef78079016b48dc Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 20:35:54 +0100 Subject: [PATCH 306/435] dev: located tracker and auth errors --- src/http/handlers.rs | 9 ++---- src/tracker/auth.rs | 67 +++++++++++++++++++++++++++++++++--------- src/tracker/error.rs | 20 +++++++++++++ src/tracker/mod.rs | 28 +++++++++++++----- src/tracker/torrent.rs | 10 ------- src/udp/error.rs | 18 ++++++------ 6 files changed, 106 insertions(+), 46 deletions(-) create mode 100644 src/tracker/error.rs diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 1170b7188..129e0d9ea 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -23,12 +23,9 @@ pub async fn authenticate( tracker: Arc, ) -> Result<(), Error> { tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { - torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, - torrent::Error::NoPeersFound => Error::NoPeersFound, - torrent::Error::CouldNotSendResponse => Error::InternalServer, - torrent::Error::InvalidInfoHash => Error::InvalidInfo, + tracker::error::Error::TorrentNotWhitelisted { info_hash, location } => Error::TorrentNotWhitelisted, + tracker::error::Error::PeerNotAuthenticated { location } => Error::PeerNotAuthenticated, + tracker::error::Error::PeerKeyNotValid { key, source } => Error::PeerKeyNotValid, }) } diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 3b8af96a1..197e0dc37 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -1,12 +1,17 @@ +use std::panic::Location; use std::str::FromStr; +use std::sync::Arc; use std::time::Duration; -use derive_more::{Display, Error}; +use chrono::{DateTime, NaiveDateTime, Utc}; +use derive_more::Display; use log::debug; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; +use thiserror::Error; +use crate::located_error::LocatedError; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; @@ -38,14 +43,19 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); match auth_key.valid_until { - Some(valid_untill) => { - if valid_untill < current_time { - Err(Error::KeyExpired) + Some(valid_until) => { + if valid_until < current_time { + Err(Error::KeyExpired { + location: Location::caller(), + }) } else { Ok(()) } } - None => Err(Error::KeyInvalid), + None => Err(Error::UnableToReadKey { + location: Location::caller(), + key: Box::new(auth_key.clone()), + }), } } @@ -57,6 +67,29 @@ pub struct Key { pub valid_until: Option, } +impl std::fmt::Display for Key { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "key: `{}`, valid until `{}`", + self.key, + match self.valid_until { + Some(duration) => format!( + "{}", + DateTime::::from_utc( + NaiveDateTime::from_timestamp( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ), + Utc + ) + ), + None => "Empty!?".to_string(), + } + ) + } +} + impl Key { #[must_use] pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { @@ -108,21 +141,27 @@ impl FromStr for KeyId { } } -#[derive(Debug, Display, PartialEq, Eq, Error)] +#[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { - #[display(fmt = "Key could not be verified.")] - KeyVerificationError, - #[display(fmt = "Key is invalid.")] - KeyInvalid, - #[display(fmt = "Key has expired.")] - KeyExpired, + #[error("Key could not be verified: {source}")] + KeyVerificationError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + #[error("Failed to read key: {key}, {location}")] + UnableToReadKey { + location: &'static Location<'static>, + key: Box, + }, + #[error("Key has expired, {location}")] + KeyExpired { location: &'static Location<'static> }, } impl From for Error { fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - eprintln!("{e}"); - Error::KeyVerificationError + Error::KeyVerificationError { + source: (Arc::new(e) as Arc).into(), + } } } diff --git a/src/tracker/error.rs b/src/tracker/error.rs new file mode 100644 index 000000000..51bcbf3bb --- /dev/null +++ b/src/tracker/error.rs @@ -0,0 +1,20 @@ +use std::panic::Location; + +use crate::located_error::LocatedError; + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("The supplied key: {key:?}, is not valid: {source}")] + PeerKeyNotValid { + key: super::auth::Key, + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + #[error("The peer is not authenticated, {location}")] + PeerNotAuthenticated { location: &'static Location<'static> }, + + #[error("The torrent: {info_hash}, is not whitelisted, {location}")] + TorrentNotWhitelisted { + info_hash: crate::protocol::info_hash::InfoHash, + location: &'static Location<'static>, + }, +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index e3b3cf88b..acbf7d536 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,4 +1,5 @@ pub mod auth; +pub mod error; pub mod mode; pub mod peer; pub mod services; @@ -8,12 +9,14 @@ pub mod torrent; use std::collections::btree_map::Entry; use std::collections::BTreeMap; use std::net::SocketAddr; +use std::panic::Location; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; +use self::error::Error; use crate::config::Configuration; use crate::databases::driver::Driver; use crate::databases::{self, Database}; @@ -41,7 +44,7 @@ pub struct TorrentsMetrics { impl Tracker { /// # Errors /// - /// Will return a `r2d2::Error` if unable to connect to database. + /// Will return a `databases::error::Error` if unable to connect to database. pub fn new( config: &Arc, stats_event_sender: Option>, @@ -98,7 +101,10 @@ impl Tracker { pub async fn verify_auth_key(&self, auth_key: &auth::Key) -> Result<(), auth::Error> { // todo: use auth::KeyId for the function argument `auth_key` match self.keys.read().await.get(&auth_key.key) { - None => Err(auth::Error::KeyInvalid), + None => Err(auth::Error::UnableToReadKey { + location: Location::caller(), + key: Box::new(auth_key.clone()), + }), Some(key) => auth::verify(key), } } @@ -204,7 +210,7 @@ impl Tracker { /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { // no authentication needed in public mode if self.is_public() { return Ok(()); @@ -214,19 +220,27 @@ impl Tracker { if self.is_private() { match key { Some(key) => { - if self.verify_auth_key(key).await.is_err() { - return Err(torrent::Error::PeerKeyNotValid); + if let Err(e) = self.verify_auth_key(key).await { + return Err(Error::PeerKeyNotValid { + key: key.clone(), + source: (Arc::new(e) as Arc).into(), + }); } } None => { - return Err(torrent::Error::PeerNotAuthenticated); + return Err(Error::PeerNotAuthenticated { + location: Location::caller(), + }); } } } // check if info_hash is whitelisted if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { - return Err(torrent::Error::TorrentNotWhitelisted); + return Err(Error::TorrentNotWhitelisted { + info_hash: *info_hash, + location: Location::caller(), + }); } Ok(()) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index e292dff54..b5535a932 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -99,16 +99,6 @@ pub struct SwamStats { pub leechers: u32, } -#[derive(Debug)] -pub enum Error { - TorrentNotWhitelisted, - PeerNotAuthenticated, - PeerKeyNotValid, - NoPeersFound, - CouldNotSendResponse, - InvalidInfoHash, -} - #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; diff --git a/src/udp/error.rs b/src/udp/error.rs index c5fbb3929..2fbaaf984 100644 --- a/src/udp/error.rs +++ b/src/udp/error.rs @@ -1,6 +1,6 @@ use thiserror::Error; -use crate::tracker::torrent; +use crate::tracker; #[derive(Error, Debug)] pub enum Error { @@ -35,15 +35,15 @@ pub enum Error { BadRequest, } -impl From for Error { - fn from(e: torrent::Error) -> Self { +impl From for Error { + fn from(e: tracker::error::Error) -> Self { match e { - torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, - torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, - torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, - torrent::Error::NoPeersFound => Error::NoPeersFound, - torrent::Error::CouldNotSendResponse => Error::InternalServer, - torrent::Error::InvalidInfoHash => Error::InvalidInfoHash, + tracker::error::Error::TorrentNotWhitelisted { + info_hash: _, + location: _, + } => Error::TorrentNotWhitelisted, + tracker::error::Error::PeerNotAuthenticated { location: _ } => Error::PeerNotAuthenticated, + tracker::error::Error::PeerKeyNotValid { key: _, source: _ } => Error::PeerKeyNotValid, } } } From 662123bbf82a5107729d26539e1f050852d8e59e Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 21:12:58 +0100 Subject: [PATCH 307/435] dev: located udp errors --- src/udp/connection_cookie.rs | 5 +++- src/udp/error.rs | 56 +++++++++++------------------------- src/udp/handlers.rs | 18 ++++++++++-- 3 files changed, 36 insertions(+), 43 deletions(-) diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index 3daa3e0f6..ef2a8b219 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -1,4 +1,5 @@ use std::net::SocketAddr; +use std::panic::Location; use aquatic_udp_protocol::ConnectionId; @@ -49,7 +50,9 @@ pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result< return Ok(checking_time_extent); } } - Err(Error::InvalidConnectionId) + Err(Error::InvalidConnectionId { + location: Location::caller(), + }) } mod cookie_builder { diff --git a/src/udp/error.rs b/src/udp/error.rs index 2fbaaf984..de66eb2bf 100644 --- a/src/udp/error.rs +++ b/src/udp/error.rs @@ -1,49 +1,27 @@ +use std::panic::Location; + use thiserror::Error; -use crate::tracker; +use crate::located_error::LocatedError; #[derive(Error, Debug)] pub enum Error { - #[error("internal server error")] - InternalServer, + #[error("tracker server error: {source}")] + TrackerError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, - #[error("info_hash is either missing or invalid")] - InvalidInfoHash, + #[error("internal server error: {message}, {location}")] + InternalServer { + location: &'static Location<'static>, + message: String, + }, #[error("connection id could not be verified")] - InvalidConnectionId, - - #[error("could not find remote address")] - AddressNotFound, - - #[error("torrent has no peers")] - NoPeersFound, - - #[error("torrent not on whitelist")] - TorrentNotWhitelisted, - - #[error("peer not authenticated")] - PeerNotAuthenticated, - - #[error("invalid authentication key")] - PeerKeyNotValid, - - #[error("exceeded info_hash limit")] - ExceededInfoHashLimit, - - #[error("bad request")] - BadRequest, -} + InvalidConnectionId { location: &'static Location<'static> }, -impl From for Error { - fn from(e: tracker::error::Error) -> Self { - match e { - tracker::error::Error::TorrentNotWhitelisted { - info_hash: _, - location: _, - } => Error::TorrentNotWhitelisted, - tracker::error::Error::PeerNotAuthenticated { location: _ } => Error::PeerNotAuthenticated, - tracker::error::Error::PeerKeyNotValid { key: _, source: _ } => Error::PeerKeyNotValid, - } - } + #[error("bad request: {source}")] + BadRequest { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 076710fb6..b36399f89 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -1,4 +1,5 @@ use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::panic::Location; use std::sync::Arc; use aquatic_udp_protocol::{ @@ -14,7 +15,10 @@ use crate::udp::error::Error; use crate::udp::request::AnnounceWrapper; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { - match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| Error::InternalServer) { + match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { + message: format!("{e:?}"), + location: Location::caller(), + }) { Ok(request) => { let transaction_id = match &request { Request::Connect(connect_request) => connect_request.transaction_id, @@ -28,7 +32,12 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A } } // bad request - Err(_) => handle_error(&Error::BadRequest, TransactionId(0)), + Err(e) => handle_error( + &Error::BadRequest { + source: (Arc::new(e) as Arc).into(), + }, + TransactionId(0), + ), } } @@ -90,7 +99,10 @@ pub async fn handle_announce( tracker .authenticate_request(&wrapped_announce_request.info_hash, &None) - .await?; + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + })?; let peer = peer::Peer::from_udp_announce_request( &wrapped_announce_request.announce_request, From 08a712dfbf5c86afc8ec49df0ed5c4e80c35b149 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 7 Feb 2023 21:22:41 +0100 Subject: [PATCH 308/435] dev: located error for http --- src/http/error.rs | 58 ++++++++++++++++++++++++------------------- src/http/filters.rs | 37 +++++++++++++++++++++------ src/http/handlers.rs | 42 ++++++++++++++++++++----------- tests/http/asserts.rs | 20 ++++++--------- 4 files changed, 97 insertions(+), 60 deletions(-) diff --git a/src/http/error.rs b/src/http/error.rs index b6c08a8ba..f07c32f6d 100644 --- a/src/http/error.rs +++ b/src/http/error.rs @@ -1,34 +1,40 @@ +use std::panic::Location; + use thiserror::Error; use warp::reject::Reject; +use crate::located_error::LocatedError; + #[derive(Error, Debug)] pub enum Error { - #[error("internal server error")] - InternalServer, - - #[error("info_hash is either missing or invalid")] - InvalidInfo, - - #[error("peer_id is either missing or invalid")] - InvalidPeerId, - - #[error("could not find remote address")] - AddressNotFound, - - #[error("torrent has no peers")] - NoPeersFound, - - #[error("torrent not on whitelist")] - TorrentNotWhitelisted, - - #[error("peer not authenticated")] - PeerNotAuthenticated, - - #[error("invalid authentication key")] - PeerKeyNotValid, - - #[error("exceeded info_hash limit")] - ExceededInfoHashLimit, + #[error("tracker server error: {source}")] + TrackerError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("internal server error: {message}, {location}")] + InternalServer { + location: &'static Location<'static>, + message: String, + }, + + #[error("no valid infohashes found, {location}")] + EmptyInfoHash { location: &'static Location<'static> }, + + #[error("peer_id is either missing or invalid, {location}")] + InvalidPeerId { location: &'static Location<'static> }, + + #[error("could not find remote address: {message}, {location}")] + AddressNotFound { + location: &'static Location<'static>, + message: String, + }, + + #[error("too many infohashes: {message}, {location}")] + TwoManyInfoHashes { + location: &'static Location<'static>, + message: String, + }, } impl Reject for Error {} diff --git a/src/http/filters.rs b/src/http/filters.rs index 0fe369eba..2760c995c 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -1,5 +1,6 @@ use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; use std::str::FromStr; use std::sync::Arc; @@ -87,9 +88,14 @@ fn info_hashes(raw_query: &String) -> WebResult> { } if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { - Err(reject::custom(Error::ExceededInfoHashLimit)) + Err(reject::custom(Error::TwoManyInfoHashes { + location: Location::caller(), + message: format! {"found: {}, but limit is: {}",info_hashes.len(), MAX_SCRAPE_TORRENTS}, + })) } else if info_hashes.is_empty() { - Err(reject::custom(Error::InvalidInfo)) + Err(reject::custom(Error::EmptyInfoHash { + location: Location::caller(), + })) } else { Ok(info_hashes) } @@ -114,7 +120,9 @@ fn peer_id(raw_query: &String) -> WebResult { // peer_id must be 20 bytes if peer_id_bytes.len() != 20 { - return Err(reject::custom(Error::InvalidPeerId)); + return Err(reject::custom(Error::InvalidPeerId { + location: Location::caller(), + })); } // clone peer_id_bytes into fixed length array @@ -128,18 +136,26 @@ fn peer_id(raw_query: &String) -> WebResult { match peer_id { Some(id) => Ok(id), - None => Err(reject::custom(Error::InvalidPeerId)), + None => Err(reject::custom(Error::InvalidPeerId { + location: Location::caller(), + })), } } /// Get `PeerAddress` from `RemoteAddress` or Forwarded fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(Error::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound { + location: Location::caller(), + message: "neither on have remote address or on a reverse proxy".to_string(), + })); } if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(Error::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound { + location: Location::caller(), + message: "must have a x-forwarded-for when using a reverse proxy".to_string(), + })); } if on_reverse_proxy { @@ -151,7 +167,14 @@ fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, tracker: Arc, ) -> Result<(), Error> { - tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { - tracker::error::Error::TorrentNotWhitelisted { info_hash, location } => Error::TorrentNotWhitelisted, - tracker::error::Error::PeerNotAuthenticated { location } => Error::PeerNotAuthenticated, - tracker::error::Error::PeerKeyNotValid { key, source } => Error::PeerKeyNotValid, - }) + tracker + .authenticate_request(info_hash, auth_key) + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) } /// Handle announce request @@ -39,9 +41,7 @@ pub async fn handle_announce( auth_key: Option, tracker: Arc, ) -> WebResult { - authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) - .await - .map_err(reject::custom)?; + authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await?; debug!("{:?}", announce_request); @@ -158,7 +158,10 @@ fn send_announce_response( if let Some(1) = announce_request.compact { match res.write_compact() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(Error::InternalServer)), + Err(e) => Err(reject::custom(Error::InternalServer { + message: e.to_string(), + location: Location::caller(), + })), } } else { Ok(Response::new(res.write().into())) @@ -171,7 +174,10 @@ fn send_scrape_response(files: HashMap) -> WebR match res.write() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(Error::InternalServer)), + Err(e) => Err(reject::custom(Error::InternalServer { + message: e.to_string(), + location: Location::caller(), + })), } } @@ -181,15 +187,21 @@ fn send_scrape_response(files: HashMap) -> WebR /// /// Will not return a error, `Infallible`, but instead convert the `ServerError` into a `Response`. pub fn send_error(r: &Rejection) -> std::result::Result { - let body = if let Some(server_error) = r.find::() { - debug!("{:?}", server_error); + let warp_reject_error = r.find::(); + + let body = if let Some(error) = warp_reject_error { + debug!("{:?}", error); response::Error { - failure_reason: server_error.to_string(), + failure_reason: error.to_string(), } .write() } else { response::Error { - failure_reason: Error::InternalServer.to_string(), + failure_reason: Error::InternalServer { + message: "Undefined".to_string(), + location: Location::caller(), + } + .to_string(), } .write() }; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index cd6bcb499..211a7bb33 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -91,7 +91,7 @@ pub async fn assert_invalid_info_hash_error_response(response: Response) { assert_error_bencoded( &response.text().await.unwrap(), - "info_hash is either missing or invalid", + "no valid infohashes found", Location::caller(), ); } @@ -109,25 +109,21 @@ pub async fn assert_invalid_peer_id_error_response(response: Response) { pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( - &response.text().await.unwrap(), - "torrent not on whitelist", - Location::caller(), - ); + assert_error_bencoded(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); } pub async fn assert_peer_not_authenticated_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded(&response.text().await.unwrap(), "peer not authenticated", Location::caller()); + assert_error_bencoded( + &response.text().await.unwrap(), + "The peer is not authenticated", + Location::caller(), + ); } pub async fn assert_invalid_authentication_key_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( - &response.text().await.unwrap(), - "invalid authentication key", - Location::caller(), - ); + assert_error_bencoded(&response.text().await.unwrap(), "is not valid", Location::caller()); } From 0dc305023772ef5d65d1bf1ae6c41daff44f797e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Feb 2023 15:37:38 +0000 Subject: [PATCH 309/435] feat(http): [#160] scaffolding for HTTP tracker using Axum We are going to migrate the HTTP tracker from Warp to Axum. This is the basic scaffolding for Axum. Tests have been duplicated to test the new Axum implementation. The setup allows executing both versions: the Warp version on production and both versions (Warp and Axum) on testing env. --- src/http/axum/handlers.rs | 9 + src/http/axum/mod.rs | 5 + src/http/axum/resources/mod.rs | 1 + src/http/axum/resources/ok.rs | 4 + src/http/axum/responses.rs | 10 + src/http/axum/routes.rs | 13 + src/http/axum/server.rs | 43 + src/http/mod.rs | 9 + src/jobs/http_tracker.rs | 68 +- src/setup.rs | 3 +- tests/http/server.rs | 33 +- tests/http_tracker.rs | 1376 ++++++++++++++++++++++++++++++-- 12 files changed, 1507 insertions(+), 67 deletions(-) create mode 100644 src/http/axum/handlers.rs create mode 100644 src/http/axum/mod.rs create mode 100644 src/http/axum/resources/mod.rs create mode 100644 src/http/axum/resources/ok.rs create mode 100644 src/http/axum/responses.rs create mode 100644 src/http/axum/routes.rs create mode 100644 src/http/axum/server.rs diff --git a/src/http/axum/handlers.rs b/src/http/axum/handlers.rs new file mode 100644 index 000000000..b2f20786b --- /dev/null +++ b/src/http/axum/handlers.rs @@ -0,0 +1,9 @@ +use axum::response::Json; + +use super::resources::ok::Ok; +use super::responses::ok_response; + +#[allow(clippy::unused_async)] +pub async fn get_status_handler() -> Json { + ok_response() +} diff --git a/src/http/axum/mod.rs b/src/http/axum/mod.rs new file mode 100644 index 000000000..57773d810 --- /dev/null +++ b/src/http/axum/mod.rs @@ -0,0 +1,5 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; +pub mod server; diff --git a/src/http/axum/resources/mod.rs b/src/http/axum/resources/mod.rs new file mode 100644 index 000000000..a493c2ac2 --- /dev/null +++ b/src/http/axum/resources/mod.rs @@ -0,0 +1 @@ +pub mod ok; diff --git a/src/http/axum/resources/ok.rs b/src/http/axum/resources/ok.rs new file mode 100644 index 000000000..adc56e6ea --- /dev/null +++ b/src/http/axum/resources/ok.rs @@ -0,0 +1,4 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Ok {} diff --git a/src/http/axum/responses.rs b/src/http/axum/responses.rs new file mode 100644 index 000000000..9c5896b35 --- /dev/null +++ b/src/http/axum/responses.rs @@ -0,0 +1,10 @@ +// Resource responses + +use axum::Json; + +use super::resources::ok::Ok; + +#[must_use] +pub fn ok_response() -> Json { + Json(Ok {}) +} diff --git a/src/http/axum/routes.rs b/src/http/axum/routes.rs new file mode 100644 index 000000000..9ab58938f --- /dev/null +++ b/src/http/axum/routes.rs @@ -0,0 +1,13 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::get_status_handler; +use crate::tracker::Tracker; + +pub fn router(_tracker: &Arc) -> Router { + Router::new() + // Status + .route("/status", get(get_status_handler)) +} diff --git a/src/http/axum/server.rs b/src/http/axum/server.rs new file mode 100644 index 000000000..541dda33e --- /dev/null +++ b/src/http/axum/server.rs @@ -0,0 +1,43 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; +use futures::Future; +use log::info; +use warp::hyper; + +use super::routes::router; +use crate::tracker::Tracker; + +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { + let app = router(tracker); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust HTTP tracker server on http://{} ...", socket_addr); + }) +} + +pub fn start_tls( + socket_addr: SocketAddr, + ssl_config: RustlsConfig, + tracker: &Arc, +) -> impl Future> { + let app = router(tracker); + + let handle = Handle::new(); + let shutdown_handle = handle.clone(); + + tokio::spawn(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust HTTP tracker server on https://{} ...", socket_addr); + shutdown_handle.shutdown(); + }); + + axum_server::bind_rustls(socket_addr, ssl_config) + .handle(handle) + .serve(app.into_make_service()) +} diff --git a/src/http/mod.rs b/src/http/mod.rs index fa4c263b5..9cd21aab5 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -9,6 +9,9 @@ //! - //! - //! + +use serde::{Deserialize, Serialize}; +pub mod axum; pub mod error; pub mod filters; pub mod handlers; @@ -19,3 +22,9 @@ pub mod server; pub type Bytes = u64; pub type WebResult = std::result::Result; + +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +pub enum Version { + Warp, + Axum, +} diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 8e38039b7..6b069301d 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -1,21 +1,31 @@ use std::net::SocketAddr; use std::sync::Arc; +use axum_server::tls_rustls::RustlsConfig; use log::{info, warn}; use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::config::HttpTracker; +use crate::http::axum::server; use crate::http::server::Http; +use crate::http::Version; use crate::tracker; #[derive(Debug)] pub struct ServerJobStarted(); +pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { + match version { + Version::Warp => start_warp(config, tracker.clone()).await, + Version::Axum => start_axum(config, tracker.clone()).await, + } +} + /// # Panics /// -/// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. -pub async fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { +/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. +async fn start_warp(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .bind_address .parse::() @@ -68,3 +78,57 @@ pub async fn start_job(config: &HttpTracker, tracker: Arc) -> join_handle } + +/// # Panics +/// +/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. +async fn start_axum(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); + + let (tx, rx) = oneshot::channel::(); + + // Run the API server + let join_handle = tokio::spawn(async move { + if !ssl_enabled { + info!("Starting Torrust HTTP tracker server on: http://{}", bind_addr); + + let handle = server::start(bind_addr, &tracker); + + tx.send(ServerJobStarted()) + .expect("the HTTP tracker server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust HTTP tracker server on http://{} stopped", bind_addr); + } + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust HTTP tracker server on: https://{}", bind_addr); + + let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) + .await + .unwrap(); + + let handle = server::start_tls(bind_addr, ssl_config, &tracker); + + tx.send(ServerJobStarted()) + .expect("the HTTP tracker server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust HTTP tracker server on https://{} stopped", bind_addr); + } + } + }); + + // Wait until the HTTP tracker server job is running + match rx.await { + Ok(_msg) => info!("Torrust HTTP tracker server started"), + Err(e) => panic!("the HTTP tracker server was dropped: {e}"), + } + + join_handle +} diff --git a/src/setup.rs b/src/setup.rs index 31be3baac..3461667cc 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -4,6 +4,7 @@ use log::warn; use tokio::task::JoinHandle; use crate::config::Configuration; +use crate::http::Version; use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::tracker; @@ -47,7 +48,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone()).await); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::Warp).await); } // Start HTTP API diff --git a/tests/http/server.rs b/tests/http/server.rs index e48ecd88d..e5266eee5 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -3,6 +3,7 @@ use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use torrust_tracker::config::{ephemeral_configuration, Configuration}; +use torrust_tracker::http::Version; use torrust_tracker::jobs::http_tracker; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::mode::Mode; @@ -13,24 +14,24 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" in settings -pub async fn start_public_http_tracker() -> Server { +pub async fn start_public_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Public; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "listed" in settings -pub async fn start_whitelisted_http_tracker() -> Server { +pub async fn start_whitelisted_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Listed; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "private" in settings -pub async fn start_private_http_tracker() -> Server { +pub async fn start_private_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Private; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with a wildcard IPV6 address. @@ -40,7 +41,7 @@ pub async fn start_private_http_tracker() -> Server { /// [[http_trackers]] /// bind_address = "[::]:7070" /// ``` -pub async fn start_ipv6_http_tracker() -> Server { +pub async fn start_ipv6_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); // Change socket address to "wildcard address" (unspecified address which means any IP address) @@ -49,7 +50,7 @@ pub async fn start_ipv6_http_tracker() -> Server { let new_ipv6_socket_address = format!("[::]:{}", socket_addr.port()); configuration.http_trackers[0].bind_address = new_ipv6_socket_address; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with an specific `external_ip`. @@ -58,10 +59,10 @@ pub async fn start_ipv6_http_tracker() -> Server { /// ```text /// external_ip = "2.137.87.41" /// ``` -pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr) -> Server { +pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.external_ip = Some(external_ip.to_string()); - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker `on_reverse_proxy`. @@ -70,24 +71,24 @@ pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr) -> Server /// ```text /// on_reverse_proxy = true /// ``` -pub async fn start_http_tracker_on_reverse_proxy() -> Server { +pub async fn start_http_tracker_on_reverse_proxy(version: Version) -> Server { let mut configuration = ephemeral_configuration(); configuration.on_reverse_proxy = true; - start_custom_http_tracker(Arc::new(configuration)).await + start_custom_http_tracker(Arc::new(configuration), version).await } -pub async fn start_default_http_tracker() -> Server { +pub async fn start_default_http_tracker(version: Version) -> Server { let configuration = tracker_configuration(); - start_custom_http_tracker(configuration.clone()).await + start_custom_http_tracker(configuration.clone(), version).await } pub fn tracker_configuration() -> Arc { Arc::new(ephemeral_configuration()) } -pub async fn start_custom_http_tracker(configuration: Arc) -> Server { +pub async fn start_custom_http_tracker(configuration: Arc, version: Version) -> Server { let server = start(&configuration); - http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone()).await; + http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone(), version).await; server } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 44bb8609d..201f8e705 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -4,7 +4,7 @@ mod common; mod http; -mod http_tracker_server { +mod warp_http_tracker_server { mod for_all_config_modes { @@ -26,6 +26,7 @@ mod http_tracker_server { use local_ip_address::local_ip; use reqwest::Response; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -46,7 +47,7 @@ mod http_tracker_server { #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -61,7 +62,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; @@ -70,7 +71,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; // Without `info_hash` param @@ -111,7 +112,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -133,7 +134,7 @@ mod http_tracker_server { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -148,7 +149,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -167,7 +168,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -186,7 +187,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -212,7 +213,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -231,7 +232,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -252,7 +253,7 @@ mod http_tracker_server { async fn should_not_fail_when_the_event_param_is_invalid() { // All invalid values are ignored as if the `event` param were empty - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -279,7 +280,7 @@ mod http_tracker_server { #[tokio::test] async fn should_not_fail_when_the_compact_param_is_invalid() { - let http_tracker_server = start_default_http_tracker().await; + let http_tracker_server = start_default_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -298,7 +299,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()) .announce( @@ -323,7 +324,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -361,7 +362,7 @@ mod http_tracker_server { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); @@ -388,7 +389,7 @@ mod http_tracker_server { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -427,7 +428,7 @@ mod http_tracker_server { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -463,7 +464,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce(&QueryBuilder::default().query()) @@ -476,7 +477,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker().await; + let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -491,7 +492,7 @@ mod http_tracker_server { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce( @@ -508,7 +509,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce(&QueryBuilder::default().query()) @@ -521,7 +522,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker().await; + let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -536,7 +537,7 @@ mod http_tracker_server { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; Client::new(http_tracker_server.get_connection_info()) .announce( @@ -553,7 +554,7 @@ mod http_tracker_server { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); @@ -583,7 +584,8 @@ mod http_tracker_server { 127.0.0.1 external_ip = "2.137.87.41" */ - let http_tracker_server = start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap()).await; + let http_tracker_server = + start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -614,9 +616,11 @@ mod http_tracker_server { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let http_tracker_server = - start_http_tracker_with_external_ip(&IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()) - .await; + let http_tracker_server = start_http_tracker_with_external_ip( + &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + Version::Warp, + ) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -647,7 +651,7 @@ mod http_tracker_server { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let http_tracker_server = start_http_tracker_on_reverse_proxy().await; + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -686,6 +690,7 @@ mod http_tracker_server { use std::net::IpAddr; use std::str::FromStr; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -699,7 +704,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; assert_internal_server_error_response(response).await; @@ -707,7 +712,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_public_http_tracker().await; + let http_tracker_server = start_public_http_tracker(Version::Warp).await; let mut params = QueryBuilder::default().query().params(); @@ -725,7 +730,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -763,7 +768,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -801,7 +806,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -818,7 +823,7 @@ mod http_tracker_server { #[tokio::test] async fn should_accept_multiple_infohashes() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); @@ -842,7 +847,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let http_tracker = start_public_http_tracker().await; + let http_tracker = start_public_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -861,7 +866,7 @@ mod http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let http_tracker = start_ipv6_http_tracker().await; + let http_tracker = start_ipv6_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -885,6 +890,7 @@ mod http_tracker_server { mod and_receiving_an_announce_request { use std::str::FromStr; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; @@ -894,7 +900,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let http_tracker_server = start_whitelisted_http_tracker().await; + let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -908,7 +914,7 @@ mod http_tracker_server { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let http_tracker_server = start_whitelisted_http_tracker().await; + let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -929,6 +935,7 @@ mod http_tracker_server { mod receiving_an_scrape_request { use std::str::FromStr; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -941,7 +948,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let http_tracker = start_whitelisted_http_tracker().await; + let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -970,7 +977,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let http_tracker = start_whitelisted_http_tracker().await; + let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1020,6 +1027,7 @@ mod http_tracker_server { use std::str::FromStr; use std::time::Duration; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; @@ -1033,7 +1041,7 @@ mod http_tracker_server { #[tokio::test] async fn should_respond_to_authenticated_peers() { - let http_tracker_server = start_private_http_tracker().await; + let http_tracker_server = start_private_http_tracker(Version::Warp).await; let key = http_tracker_server .tracker @@ -1050,7 +1058,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let http_tracker_server = start_private_http_tracker().await; + let http_tracker_server = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1063,7 +1071,7 @@ mod http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_authentication_key_is_not_valid() { - let http_tracker_server = start_private_http_tracker().await; + let http_tracker_server = start_private_http_tracker(Version::Warp).await; // The tracker does not have this key let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -1081,6 +1089,7 @@ mod http_tracker_server { use std::str::FromStr; use std::time::Duration; + use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; use torrust_tracker::tracker::peer; @@ -1094,7 +1103,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let http_tracker = start_private_http_tracker().await; + let http_tracker = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1123,7 +1132,7 @@ mod http_tracker_server { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let http_tracker = start_private_http_tracker().await; + let http_tracker = start_private_http_tracker(Version::Warp).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1165,7 +1174,1278 @@ mod http_tracker_server { async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { // There is not authentication error - let http_tracker = start_private_http_tracker().await; + let http_tracker = start_private_http_tracker(Version::Warp).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let false_key_id: KeyId = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } + } + + mod configured_as_private_and_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} + } +} + +mod axum_http_tracker_server { + + // WIP: migration HTTP from Warp to Axum + + use torrust_tracker::http::Version; + + use crate::http::client::Client; + use crate::http::server::start_default_http_tracker; + + #[tokio::test] + async fn should_return_the_status() { + // This is a temporary test to test the new Axum HTTP tracker server scaffolding + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let response = Client::new(http_tracker_server.get_connection_info()).get("status").await; + + assert_eq!(response.status(), 200); + assert_eq!(response.text().await.unwrap(), "{}"); + } + + mod for_all_config_modes { + + mod receiving_an_announce_request { + + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + + use std::net::{IpAddr, Ipv6Addr}; + use std::str::FromStr; + + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{ + assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, + assert_internal_server_error_response, assert_invalid_info_hash_error_response, + assert_invalid_peer_id_error_response, assert_is_announce_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::{Compact, QueryBuilder}; + use crate::http::responses; + use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::http::server::{ + start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, + start_ipv6_http_tracker, start_public_http_tracker, + }; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_respond_if_only_the_mandatory_fields_are_provided() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + params.remove_optional_params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; + + assert_internal_server_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_a_mandatory_field_is_missing() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + // Without `info_hash` param + + let mut params = QueryBuilder::default().query().params(); + + params.info_hash = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + + // Without `peer_id` param + + let mut params = QueryBuilder::default().query().params(); + + params.peer_id = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + + // Without `port` param + + let mut params = QueryBuilder::default().query().params(); + + params.port = None; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_info_hash_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. + + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; + + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_invalid_peer_id_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_port_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("port", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_left_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("left", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_fail_when_the_event_param_is_invalid() { + // All invalid values are ignored as if the `event` param were empty + + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase + "Stopped", // It should be lowercase + "Completed", // It should be lowercase + ]; + + for invalid_value in invalid_values { + params.set("event", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_is_announce_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_fail_when_the_compact_param_is_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("compact", invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .query(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![], + }, + ) + .await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_list_of_previously_announced_peers() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2. This new peer is non included on the response peer list + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .query(), + ) + .await; + + // It should only contain teh previously announced peer + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(previously_announced_peer)], + }, + ) + .await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().build(); + + // Add a peer + http_tracker_server.add_torrent(&info_hash, &peer).await; + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .query(); + + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&announce_query) + .await; + + assert_empty_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 accepting compact responses + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; + + let expected_response = responses::announce::Compact { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; + + assert_compact_announce_response(response, &expected_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; + + assert!(!is_a_compact_announce_response(response).await); + } + + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 0); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + + Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = http_tracker_server.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 0); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ + + let http_tracker_server = + start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ + + let http_tracker_server = start_http_tracker_with_external_ip( + &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + Version::Axum, + ) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let client = Client::new(http_tracker_server.get_connection_info()); + + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); + + // todo: shouldn't be the the leftmost IP address? + // THe application is taken the the rightmost IP address. See function http::filters::peer_addr + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; + + let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + } + } + + mod receiving_an_scrape_request { + + // Scrape documentation: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape + + use std::net::IpAddr; + use std::str::FromStr; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::requests::scrape::QueryBuilder; + use crate::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + + assert_internal_server_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + // code-review: it's not returning the invalid info hash error + assert_internal_server_error_response(response).await; + } + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_accept_multiple_infohashes() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let http_tracker = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let http_tracker = start_ipv6_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::bind(http_tracker.get_connection_info(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = http_tracker.tracker.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } + } + } + + mod configured_as_whitelisted { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + + use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_whitelisted_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_torrent_not_in_whitelist_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_allow_announcing_a_whitelisted_torrent() { + let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker_server + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_is_announce_response(response).await; + } + } + + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_whitelisted_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + http_tracker + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + } + } + + mod configured_as_private { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + + use crate::http::asserts::{ + assert_invalid_authentication_key_error_response, assert_is_announce_response, + assert_peer_not_authenticated_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_private_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_respond_to_authenticated_peers() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let key = http_tracker_server + .tracker + .generate_auth_key(Duration::from_secs(60)) + .await + .unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) + .announce(&QueryBuilder::default().query()) + .await; + + assert_is_announce_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_peer_not_authenticated_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_if_the_peer_authentication_key_is_not_valid() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + // The tracker does not have this key + let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) + .announce(&QueryBuilder::default().query()) + .await; + + assert_invalid_authentication_key_error_response(response).await; + } + } + + mod receiving_an_scrape_request { + + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::http::Version; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::peer; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::server::start_private_http_tracker; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let http_tracker = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(http_tracker.get_connection_info()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let http_tracker = start_private_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + http_tracker + .add_torrent( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let key = http_tracker.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(http_tracker.get_connection_info(), key.id()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + + let http_tracker = start_private_http_tracker(Version::Axum).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); From 7dc48387920510427f63e964e4dbf29b56d3cf87 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Feb 2023 10:08:28 +0000 Subject: [PATCH 310/435] refactor(http): [#160] extract functions for percent decoding --- src/http/filters.rs | 22 +++--- src/http/mod.rs | 1 + src/http/percent_encoding.rs | 66 ++++++++++++++++ src/protocol/info_hash.rs | 71 +++++++++++++++++ src/tracker/peer.rs | 134 ++++++++++++++++++++++++++++++++ tests/http/bencode.rs | 15 ---- tests/http/mod.rs | 23 +++++- tests/http/requests/announce.rs | 7 +- tests/http/requests/scrape.rs | 5 +- tests/http/responses/scrape.rs | 2 +- tests/http_tracker.rs | 67 +++------------- 11 files changed, 318 insertions(+), 95 deletions(-) create mode 100644 src/http/percent_encoding.rs delete mode 100644 tests/http/bencode.rs diff --git a/src/http/filters.rs b/src/http/filters.rs index 2760c995c..e02eac523 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -7,6 +7,7 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; +use super::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use super::{request, WebResult}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; @@ -78,9 +79,11 @@ fn info_hashes(raw_query: &String) -> WebResult> { for v in split_raw_query { if v.contains("info_hash") { + // get raw percent encoded infohash let raw_info_hash = v.split('=').collect::>()[1]; - let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); + + let info_hash = percent_decode_info_hash(raw_info_hash); + if let Ok(ih) = info_hash { info_hashes.push(ih); } @@ -112,24 +115,17 @@ fn peer_id(raw_query: &String) -> WebResult { for v in split_raw_query { // look for the peer_id param if v.contains("peer_id") { - // get raw percent_encoded peer_id + // get raw percent encoded peer id let raw_peer_id = v.split('=').collect::>()[1]; - // decode peer_id - let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); - - // peer_id must be 20 bytes - if peer_id_bytes.len() != 20 { + if let Ok(id) = percent_decode_peer_id(raw_peer_id) { + peer_id = Some(id); + } else { return Err(reject::custom(Error::InvalidPeerId { location: Location::caller(), })); } - // clone peer_id_bytes into fixed length array - let mut byte_arr: [u8; 20] = Default::default(); - byte_arr.clone_from_slice(peer_id_bytes.as_slice()); - - peer_id = Some(peer::Id(byte_arr)); break; } } diff --git a/src/http/mod.rs b/src/http/mod.rs index 9cd21aab5..15f7abb52 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -15,6 +15,7 @@ pub mod axum; pub mod error; pub mod filters; pub mod handlers; +pub mod percent_encoding; pub mod request; pub mod response; pub mod routes; diff --git a/src/http/percent_encoding.rs b/src/http/percent_encoding.rs new file mode 100644 index 000000000..9b5b79ed7 --- /dev/null +++ b/src/http/percent_encoding.rs @@ -0,0 +1,66 @@ +use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::tracker::peer::{self, IdConversionError}; + +/// # Errors +/// +/// Will return `Err` if if the decoded bytes do not represent a valid `InfoHash`. +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); + InfoHash::try_from(bytes) +} + +/// # Errors +/// +/// Will return `Err` if if the decoded bytes do not represent a valid `peer::Id`. +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); + peer::Id::try_from(bytes) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn it_should_decode_a_percent_encoded_info_hash() { + let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; + + let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() + ); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_info_hash() { + let invalid_encoded_infohash = "invalid percent-encoded infohash"; + + let info_hash = percent_decode_info_hash(invalid_encoded_infohash); + + assert!(info_hash.is_err()); + } + + #[test] + fn it_should_decode_a_percent_encoded_peer_id() { + let encoded_peer_id = "%2DqB00000000000000000"; + + let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); + + assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_peer_id() { + let invalid_encoded_peer_id = "invalid percent-encoded peer id"; + + let peer_id = percent_decode_peer_id(invalid_encoded_peer_id); + + assert!(peer_id.is_err()); + } +} diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 83a595c1f..320636725 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -1,7 +1,24 @@ +use std::panic::Location; + +use thiserror::Error; + #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct InfoHash(pub [u8; 20]); +const INFO_HASH_BYTES_LEN: usize = 20; + impl InfoHash { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); + let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + /// For readability, when accessing the bytes array #[must_use] pub fn bytes(&self) -> [u8; 20] { @@ -57,6 +74,40 @@ impl std::convert::From<[u8; 20]> for InfoHash { } } +#[derive(Error, Debug)] +pub enum ConversionError { + #[error("not enough bytes for infohash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + #[error("too many bytes for infohash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + impl serde::ser::Serialize for InfoHash { fn serialize(&self, serializer: S) -> Result { let mut buffer = [0u8; 40]; @@ -166,6 +217,26 @@ mod tests { ); } + #[test] + fn an_info_hash_can_be_created_from_a_byte_vector() { + let info_hash: InfoHash = [255u8; 20].to_vec().try_into().unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_less_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 19].to_vec()).is_err()); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_more_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 21].to_vec()).is_err()); + } + #[test] fn an_info_hash_can_be_serialized() { let s = ContainingInfoHash { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 3f639f970..16c96e04b 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,8 +1,10 @@ use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::Serialize; +use thiserror::Error; use crate::http::request::Announce; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; @@ -91,6 +93,69 @@ impl Peer { #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] pub struct Id(pub [u8; 20]); +const PEER_ID_BYTES_LEN: usize = 20; + +#[derive(Error, Debug)] +pub enum IdConversionError { + #[error("not enough bytes for peer id: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + #[error("too many bytes for peer id: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl Id { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), PEER_ID_BYTES_LEN); + let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } +} + +impl From<[u8; 20]> for Id { + fn from(bytes: [u8; 20]) -> Self { + Id(bytes) + } +} + +impl TryFrom> for Id { + type Error = IdConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < PEER_ID_BYTES_LEN { + return Err(IdConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + if bytes.len() > PEER_ID_BYTES_LEN { + return Err(IdConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl std::str::FromStr for Id { + type Err = IdConversionError; + + fn from_str(s: &str) -> Result { + Self::try_from(s.as_bytes().to_vec()) + } +} + impl std::fmt::Display for Id { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.to_hex_string() { @@ -239,6 +304,75 @@ mod test { mod torrent_peer_id { use crate::tracker::peer; + #[test] + fn should_be_instantiated_from_a_byte_slice() { + let id = peer::Id::from_bytes(&[ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_instantiate_from_a_byte_slice_with_less_than_20_bytes() { + let less_than_20_bytes = [0; 19]; + let _ = peer::Id::from_bytes(&less_than_20_bytes); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_instantiate_from_a_byte_slice_with_more_than_20_bytes() { + let more_than_20_bytes = [0; 21]; + let _ = peer::Id::from_bytes(&more_than_20_bytes); + } + + #[test] + fn should_be_converted_from_a_20_byte_array() { + let id = peer::Id::from([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + fn should_be_converted_from_a_byte_vector() { + let id = peer::Id::try_from( + [ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ] + .to_vec(), + ) + .unwrap(); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_convert_from_a_byte_vector_with_less_than_20_bytes() { + let _ = peer::Id::try_from([0; 19].to_vec()).unwrap(); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_convert_from_a_byte_vector_with_more_than_20_bytes() { + let _ = peer::Id::try_from([0; 21].to_vec()).unwrap(); + } + #[test] fn should_be_converted_to_hex_string() { let id = peer::Id(*b"-qB00000000000000000"); diff --git a/tests/http/bencode.rs b/tests/http/bencode.rs deleted file mode 100644 index d107089cf..000000000 --- a/tests/http/bencode.rs +++ /dev/null @@ -1,15 +0,0 @@ -pub type ByteArray20 = [u8; 20]; - -pub struct InfoHash(ByteArray20); - -impl InfoHash { - pub fn new(vec: &[u8]) -> Self { - let mut byte_array_20: ByteArray20 = Default::default(); - byte_array_20.clone_from_slice(vec); - Self(byte_array_20) - } - - pub fn bytes(&self) -> ByteArray20 { - self.0 - } -} diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 87087026f..8c1e3c995 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,7 +1,28 @@ pub mod asserts; -pub mod bencode; pub mod client; pub mod connection_info; pub mod requests; pub mod responses; pub mod server; + +use percent_encoding::NON_ALPHANUMERIC; + +pub type ByteArray20 = [u8; 20]; + +pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { + percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() +} + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/tests/http/requests/announce.rs b/tests/http/requests/announce.rs index a8ebc95f8..87aa3425f 100644 --- a/tests/http/requests/announce.rs +++ b/tests/http/requests/announce.rs @@ -2,12 +2,11 @@ use std::fmt; use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; -use percent_encoding::NON_ALPHANUMERIC; use serde_repr::Serialize_repr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; -use crate::http::bencode::ByteArray20; +use crate::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: ByteArray20, @@ -211,11 +210,11 @@ impl QueryParams { let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); Self { - info_hash: Some(percent_encoding::percent_encode(&announce_query.info_hash, NON_ALPHANUMERIC).to_string()), + info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), peer_addr: Some(announce_query.peer_addr.to_string()), downloaded: Some(announce_query.downloaded.to_string()), uploaded: Some(announce_query.uploaded.to_string()), - peer_id: Some(percent_encoding::percent_encode(&announce_query.peer_id, NON_ALPHANUMERIC).to_string()), + peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), port: Some(announce_query.port.to_string()), left: Some(announce_query.left.to_string()), event, diff --git a/tests/http/requests/scrape.rs b/tests/http/requests/scrape.rs index 6ab46974b..979dad540 100644 --- a/tests/http/requests/scrape.rs +++ b/tests/http/requests/scrape.rs @@ -1,10 +1,9 @@ use std::fmt; use std::str::FromStr; -use percent_encoding::NON_ALPHANUMERIC; use torrust_tracker::protocol::info_hash::InfoHash; -use crate::http::bencode::ByteArray20; +use crate::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: Vec, @@ -111,7 +110,7 @@ impl QueryParams { let info_hashes = scrape_query .info_hash .iter() - .map(|info_hash_bytes| percent_encoding::percent_encode(info_hash_bytes, NON_ALPHANUMERIC).to_string()) + .map(percent_encode_byte_array) .collect::>(); Self { info_hash: info_hashes } diff --git a/tests/http/responses/scrape.rs b/tests/http/responses/scrape.rs index 5bf938ebe..1aea517cf 100644 --- a/tests/http/responses/scrape.rs +++ b/tests/http/responses/scrape.rs @@ -4,7 +4,7 @@ use std::str; use serde::{self, Deserialize, Serialize}; use serde_bencode::value::Value; -use crate::http::bencode::{ByteArray20, InfoHash}; +use crate::http::{ByteArray20, InfoHash}; #[derive(Debug, PartialEq, Default)] pub struct Response { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 201f8e705..60219d9fe 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1,6 +1,14 @@ /// Integration tests for HTTP tracker server /// -/// cargo test `http_tracker_server` -- --nocapture +/// Warp version: +/// ```text +/// cargo test `warp_http_tracker_server` -- --nocapture +/// ``` +/// +/// Axum version ()WIP): +/// ```text +/// cargo test `warp_http_tracker_server` -- --nocapture +/// ``` mod common; mod http; @@ -2483,60 +2491,3 @@ mod axum_http_tracker_server { mod receiving_an_scrape_request {} } } - -mod percent_encoding { - // todo: these operations are used in the HTTP tracker but they have not been extracted into independent functions. - // These tests document the operations. This behavior could be move to some functions int he future if they are extracted. - - use std::str::FromStr; - - use percent_encoding::NON_ALPHANUMERIC; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - - #[test] - fn how_to_encode_an_info_hash() { - let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - - let encoded_info_hash = percent_encoding::percent_encode(&info_hash.0, NON_ALPHANUMERIC).to_string(); - - assert_eq!(encoded_info_hash, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"); - } - - #[test] - fn how_to_decode_an_info_hash() { - let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; - - let info_hash_bytes = percent_encoding::percent_decode_str(encoded_infohash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)).unwrap(); - - assert_eq!( - info_hash, - InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() - ); - } - - #[test] - fn how_to_encode_a_peer_id() { - let peer_id = peer::Id(*b"-qB00000000000000000"); - - let encoded_peer_id = percent_encoding::percent_encode(&peer_id.0, NON_ALPHANUMERIC).to_string(); - - assert_eq!(encoded_peer_id, "%2DqB00000000000000000"); - } - - #[test] - fn how_to_decode_a_peer_id() { - let encoded_peer_id = "%2DqB00000000000000000"; - - let bytes_vec = percent_encoding::percent_decode_str(encoded_peer_id).collect::>(); - - // Clone peer_id_bytes into fixed length array - let mut peer_id_bytes: [u8; 20] = Default::default(); - peer_id_bytes.clone_from_slice(bytes_vec.as_slice()); - - let peer_id = peer::Id(peer_id_bytes); - - assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); - } -} From 9c25febf41652f23fc21144369e1e8b0b0f1d40a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Feb 2023 09:33:32 +0000 Subject: [PATCH 311/435] feat(http): [#160] Axum extractor to extract announce request params from url query WIP: only for mandatory params. --- cSpell.json | 1 + src/http/axum/extractors.rs | 159 ++++++++++++++++++++++++++++++++++++ src/http/axum/handlers.rs | 16 ++++ src/http/axum/mod.rs | 2 + src/http/axum/query.rs | 138 +++++++++++++++++++++++++++++++ src/http/axum/routes.rs | 6 +- src/tracker/peer.rs | 11 +++ 7 files changed, 331 insertions(+), 2 deletions(-) create mode 100644 src/http/axum/extractors.rs create mode 100644 src/http/axum/query.rs diff --git a/cSpell.json b/cSpell.json index 9f10d99e4..a451d18dc 100644 --- a/cSpell.json +++ b/cSpell.json @@ -73,6 +73,7 @@ "uroot", "Vagaa", "Vuze", + "whitespaces", "Xtorrent", "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", diff --git a/src/http/axum/extractors.rs b/src/http/axum/extractors.rs new file mode 100644 index 000000000..260c3e705 --- /dev/null +++ b/src/http/axum/extractors.rs @@ -0,0 +1,159 @@ +use std::panic::Location; +use std::str::FromStr; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::http::StatusCode; +use thiserror::Error; + +use super::query::Query; +use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::tracker::peer::{self, IdConversionError}; + +pub struct ExtractAnnounceParams(pub AnnounceParams); + +#[derive(Debug, PartialEq)] +pub struct AnnounceParams { + pub info_hash: InfoHash, + pub peer_id: peer::Id, + pub port: u16, +} + +#[derive(Error, Debug)] +pub enum ParseAnnounceQueryError { + #[error("missing infohash {location}")] + MissingInfoHash { location: &'static Location<'static> }, + #[error("invalid infohash {location}")] + InvalidInfoHash { location: &'static Location<'static> }, + #[error("missing peer id {location}")] + MissingPeerId { location: &'static Location<'static> }, + #[error("invalid peer id {location}")] + InvalidPeerId { location: &'static Location<'static> }, + #[error("missing port {location}")] + MissingPort { location: &'static Location<'static> }, + #[error("invalid port {location}")] + InvalidPort { location: &'static Location<'static> }, +} + +impl From for ParseAnnounceQueryError { + #[track_caller] + fn from(_err: IdConversionError) -> Self { + Self::InvalidPeerId { + location: Location::caller(), + } + } +} + +impl From for ParseAnnounceQueryError { + #[track_caller] + fn from(_err: ConversionError) -> Self { + Self::InvalidPeerId { + location: Location::caller(), + } + } +} + +impl TryFrom for AnnounceParams { + type Error = ParseAnnounceQueryError; + + fn try_from(query: Query) -> Result { + Ok(Self { + info_hash: extract_info_hash(&query)?, + peer_id: extract_peer_id(&query)?, + port: extract_port(&query)?, + }) + } +} + +fn extract_info_hash(query: &Query) -> Result { + match query.get_param("info_hash") { + Some(raw_info_hash) => Ok(percent_decode_info_hash(&raw_info_hash)?), + None => { + return Err(ParseAnnounceQueryError::MissingInfoHash { + location: Location::caller(), + }) + } + } +} + +fn extract_peer_id(query: &Query) -> Result { + match query.get_param("peer_id") { + Some(raw_peer_id) => Ok(percent_decode_peer_id(&raw_peer_id)?), + None => { + return Err(ParseAnnounceQueryError::MissingPeerId { + location: Location::caller(), + }) + } + } +} + +fn extract_port(query: &Query) -> Result { + match query.get_param("port") { + Some(raw_port) => Ok(u16::from_str(&raw_port).map_err(|_e| ParseAnnounceQueryError::InvalidPort { + location: Location::caller(), + })?), + None => { + return Err(ParseAnnounceQueryError::MissingPort { + location: Location::caller(), + }) + } + } +} + +#[async_trait] +impl FromRequestParts for ExtractAnnounceParams +where + S: Send + Sync, +{ + type Rejection = (StatusCode, &'static str); + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let raw_query = parts.uri.query(); + + if raw_query.is_none() { + return Err((StatusCode::BAD_REQUEST, "missing query params")); + } + + let query = raw_query.unwrap().parse::(); + + if query.is_err() { + return Err((StatusCode::BAD_REQUEST, "can't parse query params")); + } + + let announce_params = AnnounceParams::try_from(query.unwrap()); + + if announce_params.is_err() { + return Err((StatusCode::BAD_REQUEST, "can't parse query params for announce request")); + } + + Ok(ExtractAnnounceParams(announce_params.unwrap())) + } +} + +#[cfg(test)] +mod tests { + use super::AnnounceParams; + use crate::http::axum::query::Query; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn announce_request_params_should_be_extracted_from_url_query_params() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + + let query = raw_query.parse::().unwrap(); + + let announce_params = AnnounceParams::try_from(query).unwrap(); + + assert_eq!( + announce_params, + AnnounceParams { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + } + ); + } +} diff --git a/src/http/axum/handlers.rs b/src/http/axum/handlers.rs index b2f20786b..050fa8e69 100644 --- a/src/http/axum/handlers.rs +++ b/src/http/axum/handlers.rs @@ -1,9 +1,25 @@ +use std::sync::Arc; + +use axum::extract::State; use axum::response::Json; +use super::extractors::ExtractAnnounceParams; use super::resources::ok::Ok; use super::responses::ok_response; +use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn get_status_handler() -> Json { ok_response() } + +/// # Panics +/// +/// todo +#[allow(clippy::unused_async)] +pub async fn announce_handler( + State(_tracker): State>, + ExtractAnnounceParams(_announce_params): ExtractAnnounceParams, +) -> Json { + todo!() +} diff --git a/src/http/axum/mod.rs b/src/http/axum/mod.rs index 57773d810..9d96362df 100644 --- a/src/http/axum/mod.rs +++ b/src/http/axum/mod.rs @@ -1,4 +1,6 @@ +pub mod extractors; pub mod handlers; +pub mod query; pub mod resources; pub mod responses; pub mod routes; diff --git a/src/http/axum/query.rs b/src/http/axum/query.rs new file mode 100644 index 000000000..5037d5d0e --- /dev/null +++ b/src/http/axum/query.rs @@ -0,0 +1,138 @@ +use std::collections::HashMap; +use std::panic::Location; +use std::str::FromStr; + +use thiserror::Error; +pub struct Query { + params: HashMap, +} + +#[derive(Error, Debug)] +pub enum ParseQueryError { + #[error("invalid param {raw_param} in {location}")] + InvalidParam { + location: &'static Location<'static>, + raw_param: String, + }, +} + +impl FromStr for Query { + type Err = ParseQueryError; + + fn from_str(raw_query: &str) -> Result { + let mut params: HashMap = HashMap::new(); + + let raw_params = raw_query.trim().trim_start_matches('?').split('&').collect::>(); + + for raw_param in raw_params { + let param: Param = raw_param.parse()?; + params.insert(param.name, param.value); + } + + Ok(Self { params }) + } +} + +#[derive(Debug, PartialEq)] +struct Param { + name: String, + value: String, +} + +impl FromStr for Param { + type Err = ParseQueryError; + + fn from_str(raw_param: &str) -> Result { + let pair = raw_param.split('=').collect::>(); + + if pair.len() > 2 { + return Err(ParseQueryError::InvalidParam { + location: Location::caller(), + raw_param: raw_param.to_owned(), + }); + } + + Ok(Self { + name: pair[0].to_owned(), + value: pair[1].to_owned(), + }) + } +} + +impl Query { + #[must_use] + pub fn get_param(&self, name: &str) -> Option { + self.params.get(name).map(std::string::ToString::to_string) + } +} + +#[cfg(test)] +mod tests { + use super::Query; + use crate::http::axum::query::Param; + + #[test] + fn it_should_parse_the_query_params_from_an_url_query_string() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!( + query.get_param("info_hash").unwrap(), + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + ); + assert_eq!(query.get_param("peer_id").unwrap(), "-qB00000000000000001"); + assert_eq!(query.get_param("port").unwrap(), "17548"); + } + + #[test] + fn it_should_fail_parsing_an_invalid_query_string() { + let invalid_raw_query = "name=value=value"; + + let query = invalid_raw_query.parse::(); + + assert!(query.is_err()); + } + + #[test] + fn it_should_ignore_the_preceding_question_mark_if_it_exists() { + let raw_query = "?name=value"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name").unwrap(), "value"); + } + + #[test] + fn it_should_trim_whitespaces() { + let raw_query = " name=value "; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name").unwrap(), "value"); + } + + #[test] + fn it_should_parse_a_single_query_param() { + let raw_param = "name=value"; + + let param = raw_param.parse::().unwrap(); + + assert_eq!( + param, + Param { + name: "name".to_string(), + value: "value".to_string(), + } + ); + } + + #[test] + fn it_should_fail_parsing_an_invalid_query_param() { + let invalid_raw_param = "name=value=value"; + + let query = invalid_raw_param.parse::(); + + assert!(query.is_err()); + } +} diff --git a/src/http/axum/routes.rs b/src/http/axum/routes.rs index 9ab58938f..8e4980682 100644 --- a/src/http/axum/routes.rs +++ b/src/http/axum/routes.rs @@ -3,11 +3,13 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; -use super::handlers::get_status_handler; +use super::handlers::{announce_handler, get_status_handler}; use crate::tracker::Tracker; -pub fn router(_tracker: &Arc) -> Router { +pub fn router(tracker: &Arc) -> Router { Router::new() // Status .route("/status", get(get_status_handler)) + // Announce request + .route("/announce", get(announce_handler).with_state(tracker.clone())) } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 16c96e04b..c132d1e2c 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -331,6 +331,17 @@ mod test { let _ = peer::Id::from_bytes(&more_than_20_bytes); } + #[test] + fn should_be_instantiated_from_a_string() { + let id = "-qB00000000000000001".parse::().unwrap(); + + let expected_id = peer::Id([ + 45, 113, 66, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 49, + ]); + + assert_eq!(id, expected_id); + } + #[test] fn should_be_converted_from_a_20_byte_array() { let id = peer::Id::from([ From 995397eae4a4b96820f52bcd203b2e0bd67be745 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Feb 2023 12:56:42 +0000 Subject: [PATCH 312/435] refactor(http): [#160] reorganize dirs for Axum and Warp HTTP tracker implementations We are going to start sharing code bettween both implementation (Warp and Axum). We need to keep common code separated because Warp implementation will be removed when Axum implementation is finished. --- .../{axum => axum_implementation}/extractors.rs | 2 +- src/http/{axum => axum_implementation}/handlers.rs | 0 src/http/{axum => axum_implementation}/mod.rs | 0 src/http/{axum => axum_implementation}/query.rs | 2 +- .../{axum => axum_implementation}/resources/mod.rs | 0 .../{axum => axum_implementation}/resources/ok.rs | 0 .../{axum => axum_implementation}/responses.rs | 0 src/http/{axum => axum_implementation}/routes.rs | 0 src/http/{axum => axum_implementation}/server.rs | 0 src/http/mod.rs | 14 +++----------- src/http/{ => warp_implementation}/error.rs | 0 src/http/{ => warp_implementation}/filters.rs | 2 +- src/http/{ => warp_implementation}/handlers.rs | 0 src/http/warp_implementation/mod.rs | 12 ++++++++++++ src/http/{ => warp_implementation}/request.rs | 2 +- src/http/{ => warp_implementation}/response.rs | 0 src/http/{ => warp_implementation}/routes.rs | 0 src/http/{ => warp_implementation}/server.rs | 0 src/jobs/http_tracker.rs | 4 ++-- src/tracker/peer.rs | 4 ++-- 20 files changed, 23 insertions(+), 19 deletions(-) rename src/http/{axum => axum_implementation}/extractors.rs (98%) rename src/http/{axum => axum_implementation}/handlers.rs (100%) rename src/http/{axum => axum_implementation}/mod.rs (100%) rename src/http/{axum => axum_implementation}/query.rs (98%) rename src/http/{axum => axum_implementation}/resources/mod.rs (100%) rename src/http/{axum => axum_implementation}/resources/ok.rs (100%) rename src/http/{axum => axum_implementation}/responses.rs (100%) rename src/http/{axum => axum_implementation}/routes.rs (100%) rename src/http/{axum => axum_implementation}/server.rs (100%) rename src/http/{ => warp_implementation}/error.rs (100%) rename src/http/{ => warp_implementation}/filters.rs (98%) rename src/http/{ => warp_implementation}/handlers.rs (100%) create mode 100644 src/http/warp_implementation/mod.rs rename src/http/{ => warp_implementation}/request.rs (94%) rename src/http/{ => warp_implementation}/response.rs (100%) rename src/http/{ => warp_implementation}/routes.rs (100%) rename src/http/{ => warp_implementation}/server.rs (100%) diff --git a/src/http/axum/extractors.rs b/src/http/axum_implementation/extractors.rs similarity index 98% rename from src/http/axum/extractors.rs rename to src/http/axum_implementation/extractors.rs index 260c3e705..a1f3fad1e 100644 --- a/src/http/axum/extractors.rs +++ b/src/http/axum_implementation/extractors.rs @@ -135,7 +135,7 @@ where #[cfg(test)] mod tests { use super::AnnounceParams; - use crate::http::axum::query::Query; + use crate::http::axum_implementation::query::Query; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/http/axum/handlers.rs b/src/http/axum_implementation/handlers.rs similarity index 100% rename from src/http/axum/handlers.rs rename to src/http/axum_implementation/handlers.rs diff --git a/src/http/axum/mod.rs b/src/http/axum_implementation/mod.rs similarity index 100% rename from src/http/axum/mod.rs rename to src/http/axum_implementation/mod.rs diff --git a/src/http/axum/query.rs b/src/http/axum_implementation/query.rs similarity index 98% rename from src/http/axum/query.rs rename to src/http/axum_implementation/query.rs index 5037d5d0e..c7c20b22d 100644 --- a/src/http/axum/query.rs +++ b/src/http/axum_implementation/query.rs @@ -69,7 +69,7 @@ impl Query { #[cfg(test)] mod tests { use super::Query; - use crate::http::axum::query::Param; + use crate::http::axum_implementation::query::Param; #[test] fn it_should_parse_the_query_params_from_an_url_query_string() { diff --git a/src/http/axum/resources/mod.rs b/src/http/axum_implementation/resources/mod.rs similarity index 100% rename from src/http/axum/resources/mod.rs rename to src/http/axum_implementation/resources/mod.rs diff --git a/src/http/axum/resources/ok.rs b/src/http/axum_implementation/resources/ok.rs similarity index 100% rename from src/http/axum/resources/ok.rs rename to src/http/axum_implementation/resources/ok.rs diff --git a/src/http/axum/responses.rs b/src/http/axum_implementation/responses.rs similarity index 100% rename from src/http/axum/responses.rs rename to src/http/axum_implementation/responses.rs diff --git a/src/http/axum/routes.rs b/src/http/axum_implementation/routes.rs similarity index 100% rename from src/http/axum/routes.rs rename to src/http/axum_implementation/routes.rs diff --git a/src/http/axum/server.rs b/src/http/axum_implementation/server.rs similarity index 100% rename from src/http/axum/server.rs rename to src/http/axum_implementation/server.rs diff --git a/src/http/mod.rs b/src/http/mod.rs index 15f7abb52..039a2067b 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -11,18 +11,10 @@ //! use serde::{Deserialize, Serialize}; -pub mod axum; -pub mod error; -pub mod filters; -pub mod handlers; -pub mod percent_encoding; -pub mod request; -pub mod response; -pub mod routes; -pub mod server; -pub type Bytes = u64; -pub type WebResult = std::result::Result; +pub mod axum_implementation; +pub mod percent_encoding; +pub mod warp_implementation; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { diff --git a/src/http/error.rs b/src/http/warp_implementation/error.rs similarity index 100% rename from src/http/error.rs rename to src/http/warp_implementation/error.rs diff --git a/src/http/filters.rs b/src/http/warp_implementation/filters.rs similarity index 98% rename from src/http/filters.rs rename to src/http/warp_implementation/filters.rs index e02eac523..176170330 100644 --- a/src/http/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -7,8 +7,8 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; -use super::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use super::{request, WebResult}; +use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, auth, peer}; diff --git a/src/http/handlers.rs b/src/http/warp_implementation/handlers.rs similarity index 100% rename from src/http/handlers.rs rename to src/http/warp_implementation/handlers.rs diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs new file mode 100644 index 000000000..4fbfb48fb --- /dev/null +++ b/src/http/warp_implementation/mod.rs @@ -0,0 +1,12 @@ +use warp::Rejection; + +pub mod error; +pub mod filters; +pub mod handlers; +pub mod request; +pub mod response; +pub mod routes; +pub mod server; + +pub type Bytes = u64; +pub type WebResult = std::result::Result; diff --git a/src/http/request.rs b/src/http/warp_implementation/request.rs similarity index 94% rename from src/http/request.rs rename to src/http/warp_implementation/request.rs index bc549b698..f666b48c5 100644 --- a/src/http/request.rs +++ b/src/http/warp_implementation/request.rs @@ -2,7 +2,7 @@ use std::net::IpAddr; use serde::Deserialize; -use crate::http::Bytes; +use crate::http::warp_implementation::Bytes; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/http/response.rs b/src/http/warp_implementation/response.rs similarity index 100% rename from src/http/response.rs rename to src/http/warp_implementation/response.rs diff --git a/src/http/routes.rs b/src/http/warp_implementation/routes.rs similarity index 100% rename from src/http/routes.rs rename to src/http/warp_implementation/routes.rs diff --git a/src/http/server.rs b/src/http/warp_implementation/server.rs similarity index 100% rename from src/http/server.rs rename to src/http/warp_implementation/server.rs diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 6b069301d..aa96af884 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -7,8 +7,8 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::config::HttpTracker; -use crate::http::axum::server; -use crate::http::server::Http; +use crate::http::axum_implementation::server; +use crate::http::warp_implementation::server::Http; use crate::http::Version; use crate::tracker; diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index c132d1e2c..04e4cdb45 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -6,7 +6,7 @@ use serde; use serde::Serialize; use thiserror::Error; -use crate::http::request::Announce; +use crate::http::warp_implementation::request::Announce; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; @@ -599,7 +599,7 @@ mod test { mod torrent_peer_constructor_from_for_http_requests { use std::net::{IpAddr, Ipv4Addr}; - use crate::http::request::Announce; + use crate::http::warp_implementation::request::Announce; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::{self, Peer}; From 4b8fbfbbc9dc33e72c7e592dbd74d3f1f206e36d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 14:23:17 +0000 Subject: [PATCH 313/435] refactor: the tracker is responsible for assigning the IP to peers --- src/http/warp_implementation/handlers.rs | 10 +- src/tracker/mod.rs | 113 +++++++++++++++++- src/tracker/peer.rs | 144 ++++------------------- src/udp/handlers.rs | 14 +-- 4 files changed, 151 insertions(+), 130 deletions(-) diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 229cb4587..0fd332cae 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; use std::convert::Infallible; -use std::net::IpAddr; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; @@ -41,11 +41,15 @@ pub async fn handle_announce( auth_key: Option, tracker: Arc, ) -> WebResult { + debug!("http announce request: {:#?}", announce_request); + authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await?; - debug!("{:?}", announce_request); + // build the peer + let peer_ip = tracker.assign_ip_address_to_peer(&announce_request.peer_addr); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); + let peer = peer::Peer::from_http_announce_request(&announce_request, &peer_socket_address); - let peer = peer::Peer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) .await; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index acbf7d536..f31a71fbb 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -8,7 +8,7 @@ pub mod torrent; use std::collections::btree_map::Entry; use std::collections::BTreeMap; -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; use std::time::Duration; @@ -76,6 +76,12 @@ impl Tracker { self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } + /// It assigns a socket address to the peer + #[must_use] + pub fn assign_ip_address_to_peer(&self, remote_client_ip: &IpAddr) -> IpAddr { + assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip()) + } + /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. @@ -378,6 +384,15 @@ impl Tracker { } } +#[must_use] +pub fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { + if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { + host_ip + } else { + *remote_client_ip + } +} + #[cfg(test)] mod tests { use std::sync::Arc; @@ -424,4 +439,100 @@ mod tests { } ); } + + mod the_tracker_assigning_the_ip_to_the_peer { + + use std::net::{IpAddr, Ipv4Addr}; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn should_use_the_source_ip_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + mod when_the_client_ip_is_a_ipv4_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip() + { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + + mod when_client_ip_is_a_ipv6_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip() + { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + } } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 04e4cdb45..e824a0cbc 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,4 +1,4 @@ -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::panic::Location; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; @@ -29,16 +29,10 @@ pub struct Peer { impl Peer { #[must_use] - pub fn from_udp_announce_request( - announce_request: &aquatic_udp_protocol::AnnounceRequest, - remote_ip: IpAddr, - host_opt_ip: Option, - ) -> Self { - let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - + pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, peer_addr: &SocketAddr) -> Self { Peer { peer_id: Id(announce_request.peer_id.0), - peer_addr, + peer_addr: *peer_addr, updated: Current::now(), uploaded: announce_request.bytes_uploaded, downloaded: announce_request.bytes_downloaded, @@ -48,9 +42,7 @@ impl Peer { } #[must_use] - pub fn from_http_announce_request(announce_request: &Announce, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); - + pub fn from_http_announce_request(announce_request: &Announce, peer_addr: &SocketAddr) -> Self { let event: AnnounceEvent = if let Some(event) = &announce_request.event { match event.as_ref() { "started" => AnnounceEvent::Started, @@ -65,7 +57,7 @@ impl Peer { #[allow(clippy::cast_possible_truncation)] Peer { peer_id: announce_request.peer_id, - peer_addr, + peer_addr: *peer_addr, updated: Current::now(), uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), @@ -74,16 +66,6 @@ impl Peer { } } - // potentially substitute localhost ip with external ip - #[must_use] - pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if let Some(host_ip) = host_opt_ip.filter(|_| remote_ip.is_loopback()) { - SocketAddr::new(host_ip, port) - } else { - SocketAddr::new(remote_ip, port) - } - } - #[must_use] pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped @@ -446,6 +428,7 @@ mod test { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; + use crate::tracker::assign_ip_address_to_peer; use crate::tracker::peer::Peer; use crate::udp::connection_cookie::{into_connection_id, make}; @@ -498,7 +481,10 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); + + let torrent_peer = Peer::from_udp_announce_request(&announce_request, &peer_socket_address); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -508,99 +494,21 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - - mod when_source_udp_ip_is_a_ipv_4_loopback_ip { - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::str::FromStr; - - use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::tracker::peer::Peer; - - #[test] - fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - - #[test] - fn it_should_use_the_external_host_ip_in_tracker_configuration_if_defined() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); - } - - #[test] - fn it_should_use_the_external_ip_in_tracker_configuration_if_defined_even_if_the_external_ip_is_an_ipv6_ip() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); - let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, &peer_socket_address); - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); - } - } - - mod when_source_udp_ip_is_a_ipv6_loopback_ip { - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::str::FromStr; - - use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::tracker::peer::Peer; - - #[test] - fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - - #[test] - fn it_should_use_the_external_host_ip_in_tracker_configuration_if_defined() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); - } - - #[test] - fn it_should_use_the_external_ip_in_tracker_configuration_if_defined_even_if_the_external_ip_is_an_ipv4_ip() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - let announce_request = AnnounceRequestBuilder::default().into(); - - let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); - } + assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } } mod torrent_peer_constructor_from_for_http_requests { - use std::net::{IpAddr, Ipv4Addr}; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use crate::http::warp_implementation::request::Announce; use crate::protocol::info_hash::InfoHash; + use crate::tracker::assign_ip_address_to_peer; use crate::tracker::peer::{self, Peer}; fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { @@ -618,13 +526,16 @@ mod test { } #[test] - fn it_should_use_the_source_ip_in_the_udp_heder_as_the_peer_ip_address_ignoring_the_peer_ip_in_the_announce_request() { + fn it_should_use_the_source_ip_in_the_udp_header_as_the_peer_ip_address_ignoring_the_peer_ip_in_the_announce_request() { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let ip_in_announce_request = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); let announce_request = sample_http_announce_request(ip_in_announce_request, 8080); - let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); + + let torrent_peer = Peer::from_http_announce_request(&announce_request, &peer_socket_address); assert_eq!(torrent_peer.peer_addr.ip(), remote_ip); assert_ne!(torrent_peer.peer_addr.ip(), ip_in_announce_request); @@ -639,18 +550,13 @@ mod test { let announce_request = sample_http_announce_request(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), port_in_announce_request); - let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); + + let torrent_peer = Peer::from_http_announce_request(&announce_request, &peer_socket_address); assert_eq!(torrent_peer.peer_addr.port(), announce_request.port); assert_ne!(torrent_peer.peer_addr.port(), remote_port); } - - // todo: other cases are already covered by UDP cases. - // Code review: - // We should extract the method "peer_addr_from_ip_and_port_and_opt_host_ip" from TorrentPeer. - // It could be another service responsible for assigning the IP to the peer. - // So we can test that behavior independently from where you use it. - // We could also build the peer with the IP in the announce request and let the tracker decide - // wether it has to change it or not depending on tracker configuration. } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index b36399f89..b6d4bed7b 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -6,6 +6,7 @@ use aquatic_udp_protocol::{ AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; +use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; @@ -93,6 +94,8 @@ pub async fn handle_announce( announce_request: &AnnounceRequest, tracker: Arc, ) -> Result { + debug!("udp announce request: {:#?}", announce_request); + check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; let wrapped_announce_request = AnnounceWrapper::new(announce_request); @@ -104,13 +107,10 @@ pub async fn handle_announce( source: (Arc::new(e) as Arc).into(), })?; - let peer = peer::Peer::from_udp_announce_request( - &wrapped_announce_request.announce_request, - remote_addr.ip(), - tracker.config.get_ext_ip(), - ); - - //let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; + // build the peer + let peer_ip = tracker.assign_ip_address_to_peer(&remote_addr.ip()); + let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); + let peer = peer::Peer::from_udp_announce_request(&wrapped_announce_request.announce_request, &peer_socket_address); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer) From 05ea74177dd033aed5704131869b8ae60a223432 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 16:38:46 +0000 Subject: [PATCH 314/435] refactor: move code from domain to delivery layer --- src/http/warp_implementation/handlers.rs | 19 +- src/http/warp_implementation/mod.rs | 1 + src/http/warp_implementation/peer_builder.rs | 32 ++++ src/tracker/peer.rs | 181 +------------------ src/udp/handlers.rs | 9 +- src/udp/mod.rs | 1 + src/udp/peer_builder.rs | 18 ++ src/udp/request.rs | 15 -- 8 files changed, 63 insertions(+), 213 deletions(-) create mode 100644 src/http/warp_implementation/peer_builder.rs create mode 100644 src/udp/peer_builder.rs diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 0fd332cae..f914e7555 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; use std::convert::Infallible; -use std::net::{IpAddr, SocketAddr}; +use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; @@ -10,6 +10,7 @@ use warp::{reject, Rejection, Reply}; use super::error::Error; use super::{request, response, WebResult}; +use crate::http::warp_implementation::peer_builder; use crate::protocol::info_hash::InfoHash; use crate::tracker::{self, auth, peer, statistics, torrent}; @@ -31,11 +32,9 @@ pub async fn authenticate( }) } -/// Handle announce request -/// /// # Errors /// -/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. +/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. pub async fn handle_announce( announce_request: request::Announce, auth_key: Option, @@ -45,10 +44,9 @@ pub async fn handle_announce( authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await?; - // build the peer let peer_ip = tracker.assign_ip_address_to_peer(&announce_request.peer_addr); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); - let peer = peer::Peer::from_http_announce_request(&announce_request, &peer_socket_address); + + let peer = peer_builder::from_request(&announce_request, &peer_ip); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) @@ -57,9 +55,6 @@ pub async fn handle_announce( // get all torrent peers excluding the peer_addr let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; - let announce_interval = tracker.config.announce_interval; - - // send stats event match announce_request.peer_addr { IpAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Tcp4Announce).await; @@ -73,13 +68,11 @@ pub async fn handle_announce( &announce_request, &torrent_stats, &peers, - announce_interval, + tracker.config.announce_interval, tracker.config.min_announce_interval, ) } -/// Handle scrape request -/// /// # Errors /// /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs index 4fbfb48fb..1dec73b29 100644 --- a/src/http/warp_implementation/mod.rs +++ b/src/http/warp_implementation/mod.rs @@ -3,6 +3,7 @@ use warp::Rejection; pub mod error; pub mod filters; pub mod handlers; +pub mod peer_builder; pub mod request; pub mod response; pub mod routes; diff --git a/src/http/warp_implementation/peer_builder.rs b/src/http/warp_implementation/peer_builder.rs new file mode 100644 index 000000000..70cf7b508 --- /dev/null +++ b/src/http/warp_implementation/peer_builder.rs @@ -0,0 +1,32 @@ +use std::net::{IpAddr, SocketAddr}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + +use super::request::Announce; +use crate::protocol::clock::{Current, Time}; +use crate::tracker::peer::Peer; + +#[must_use] +pub fn from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { + let event: AnnounceEvent = if let Some(event) = &announce_request.event { + match event.as_ref() { + "started" => AnnounceEvent::Started, + "stopped" => AnnounceEvent::Stopped, + "completed" => AnnounceEvent::Completed, + _ => AnnounceEvent::None, + } + } else { + AnnounceEvent::None + }; + + #[allow(clippy::cast_possible_truncation)] + Peer { + peer_id: announce_request.peer_id, + peer_addr: SocketAddr::new(*peer_ip, announce_request.port), + updated: Current::now(), + uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), + downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), + left: NumberOfBytes(i128::from(announce_request.left) as i64), + event, + } +} diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index e824a0cbc..24cc99f9b 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -6,8 +6,7 @@ use serde; use serde::Serialize; use thiserror::Error; -use crate::http::warp_implementation::request::Announce; -use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; +use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; @@ -28,44 +27,6 @@ pub struct Peer { } impl Peer { - #[must_use] - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, peer_addr: &SocketAddr) -> Self { - Peer { - peer_id: Id(announce_request.peer_id.0), - peer_addr: *peer_addr, - updated: Current::now(), - uploaded: announce_request.bytes_uploaded, - downloaded: announce_request.bytes_downloaded, - left: announce_request.bytes_left, - event: announce_request.event, - } - } - - #[must_use] - pub fn from_http_announce_request(announce_request: &Announce, peer_addr: &SocketAddr) -> Self { - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None, - } - } else { - AnnounceEvent::None - }; - - #[allow(clippy::cast_possible_truncation)] - Peer { - peer_id: announce_request.peer_id, - peer_addr: *peer_addr, - updated: Current::now(), - uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), - downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), - left: NumberOfBytes(i128::from(announce_request.left) as i64), - event, - } - } - #[must_use] pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped @@ -419,144 +380,4 @@ mod test { ); } } - - mod torrent_peer_constructor_from_udp_requests { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, - }; - - use crate::tracker::assign_ip_address_to_peer; - use crate::tracker::peer::Peer; - use crate::udp::connection_cookie::{into_connection_id, make}; - - // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. - - fn sample_ipv4_remote_addr() -> SocketAddr { - sample_ipv4_socket_address() - } - - fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - } - - struct AnnounceRequestBuilder { - request: AnnounceRequest, - } - - impl AnnounceRequestBuilder { - pub fn default() -> AnnounceRequestBuilder { - let client_ip = Ipv4Addr::new(126, 0, 0, 1); - let client_port = 8080; - let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); - - let default_request = AnnounceRequest { - connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), - transaction_id: TransactionId(0i32), - info_hash: info_hash_aquatic, - peer_id: AquaticPeerId(*b"-qB00000000000000000"), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(client_ip), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client_port), - }; - AnnounceRequestBuilder { - request: default_request, - } - } - - pub fn into(self) -> AnnounceRequest { - self.request - } - } - - #[test] - fn it_should_use_the_udp_source_ip_as_the_peer_ip_address_instead_of_the_ip_in_the_announce_request() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - let announce_request = AnnounceRequestBuilder::default().into(); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); - - let torrent_peer = Peer::from_udp_announce_request(&announce_request, &peer_socket_address); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - - #[test] - fn it_should_always_use_the_port_in_the_announce_request_for_the_peer_port() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - let announce_request = AnnounceRequestBuilder::default().into(); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); - - let torrent_peer = Peer::from_udp_announce_request(&announce_request, &peer_socket_address); - - assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); - } - } - - mod torrent_peer_constructor_from_for_http_requests { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - use crate::http::warp_implementation::request::Announce; - use crate::protocol::info_hash::InfoHash; - use crate::tracker::assign_ip_address_to_peer; - use crate::tracker::peer::{self, Peer}; - - fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { - Announce { - info_hash: InfoHash([0u8; 20]), - peer_addr, - downloaded: 0u64, - uploaded: 0u64, - peer_id: peer::Id(*b"-qB00000000000000000"), - port, - left: 0u64, - event: None, - compact: None, - } - } - - #[test] - fn it_should_use_the_source_ip_in_the_udp_header_as_the_peer_ip_address_ignoring_the_peer_ip_in_the_announce_request() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - - let ip_in_announce_request = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - let announce_request = sample_http_announce_request(ip_in_announce_request, 8080); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); - - let torrent_peer = Peer::from_http_announce_request(&announce_request, &peer_socket_address); - - assert_eq!(torrent_peer.peer_addr.ip(), remote_ip); - assert_ne!(torrent_peer.peer_addr.ip(), ip_in_announce_request); - } - - #[test] - fn it_should_always_use_the_port_in_the_announce_request_for_the_peer_port_ignoring_the_port_in_the_udp_header() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - let remote_port = 8080; - - let port_in_announce_request = 8081; - let announce_request = - sample_http_announce_request(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), port_in_announce_request); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port); - - let torrent_peer = Peer::from_http_announce_request(&announce_request, &peer_socket_address); - - assert_eq!(torrent_peer.peer_addr.port(), announce_request.port); - assert_ne!(torrent_peer.peer_addr.port(), remote_port); - } - } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index b6d4bed7b..53efa7ecc 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -11,8 +11,9 @@ use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::{self, peer, statistics}; +use crate::tracker::{self, statistics}; use crate::udp::error::Error; +use crate::udp::peer_builder; use crate::udp::request::AnnounceWrapper; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { @@ -107,10 +108,9 @@ pub async fn handle_announce( source: (Arc::new(e) as Arc).into(), })?; - // build the peer let peer_ip = tracker.assign_ip_address_to_peer(&remote_addr.ip()); - let peer_socket_address = SocketAddr::new(peer_ip, announce_request.port.0); - let peer = peer::Peer::from_udp_announce_request(&wrapped_announce_request.announce_request, &peer_socket_address); + + let peer = peer_builder::from_request(&wrapped_announce_request, &peer_ip); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer) @@ -164,7 +164,6 @@ pub async fn handle_announce( }) }; - // send stats event match remote_addr { SocketAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Udp4Announce).await; diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 8b8c8c4f8..b6431f752 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -3,6 +3,7 @@ pub mod error; pub mod handlers; pub mod request; pub mod server; +pub mod peer_builder; pub type Bytes = u64; pub type Port = u16; diff --git a/src/udp/peer_builder.rs b/src/udp/peer_builder.rs new file mode 100644 index 000000000..84eae64f9 --- /dev/null +++ b/src/udp/peer_builder.rs @@ -0,0 +1,18 @@ +use std::net::{IpAddr, SocketAddr}; + +use super::request::AnnounceWrapper; +use crate::protocol::clock::{Current, Time}; +use crate::tracker::peer::{Id, Peer}; + +#[must_use] +pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> Peer { + Peer { + peer_id: Id(announce_wrapper.announce_request.peer_id.0), + peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), + updated: Current::now(), + uploaded: announce_wrapper.announce_request.bytes_uploaded, + downloaded: announce_wrapper.announce_request.bytes_downloaded, + left: announce_wrapper.announce_request.bytes_left, + event: announce_wrapper.announce_request.event, + } +} diff --git a/src/udp/request.rs b/src/udp/request.rs index c4326b291..28d75f860 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -2,21 +2,6 @@ use aquatic_udp_protocol::AnnounceRequest; use crate::protocol::info_hash::InfoHash; -// struct AnnounceRequest { -// pub connection_id: i64, -// pub transaction_id: i32, -// pub info_hash: InfoHash, -// pub peer_id: PeerId, -// pub bytes_downloaded: Bytes, -// pub bytes_uploaded: Bytes, -// pub bytes_left: Bytes, -// pub event: AnnounceEvent, -// pub ip_address: Option, -// pub key: u32, -// pub peers_wanted: u32, -// pub port: Port -// } - pub struct AnnounceWrapper { pub announce_request: AnnounceRequest, pub info_hash: InfoHash, From 156ac4d0c9bb9a734d586564de4eb24bac60f399 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 17:17:23 +0000 Subject: [PATCH 315/435] refactor: clean announce request handlers There is duplicate code in announce handlers for UDP and HTTP tracker. This change makes them more similar in order to extract the common part later. --- src/http/warp_implementation/handlers.rs | 16 ++++---- src/tracker/mod.rs | 2 +- src/udp/handlers.rs | 51 +++++++++++++----------- 3 files changed, 37 insertions(+), 32 deletions(-) diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index f914e7555..2a0aa005c 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -42,20 +42,20 @@ pub async fn handle_announce( ) -> WebResult { debug!("http announce request: {:#?}", announce_request); - authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await?; + let info_hash = announce_request.info_hash; + let remote_client_ip = announce_request.peer_addr; - let peer_ip = tracker.assign_ip_address_to_peer(&announce_request.peer_addr); + authenticate(&info_hash, &auth_key, tracker.clone()).await?; + + let peer_ip = tracker.assign_ip_address_to_peer(&remote_client_ip); let peer = peer_builder::from_request(&announce_request, &peer_ip); - let torrent_stats = tracker - .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) - .await; + let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - // get all torrent peers excluding the peer_addr - let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; + let peers = tracker.get_other_peers(&info_hash, &peer.peer_addr).await; - match announce_request.peer_addr { + match remote_client_ip { IpAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Tcp4Announce).await; } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index f31a71fbb..a6ea6d3b0 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -279,7 +279,7 @@ impl Tracker { } /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + pub async fn get_other_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 53efa7ecc..283041333 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -87,6 +87,18 @@ pub async fn handle_connect( Ok(response) } +/// # Errors +/// +/// Will return `Error` if unable to `authenticate_request`. +pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), Error> { + tracker + .authenticate_request(info_hash, &None) + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) +} + /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. @@ -101,25 +113,27 @@ pub async fn handle_announce( let wrapped_announce_request = AnnounceWrapper::new(announce_request); - tracker - .authenticate_request(&wrapped_announce_request.info_hash, &None) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - })?; + let info_hash = wrapped_announce_request.info_hash; + let remote_client_ip = remote_addr.ip(); + + authenticate(&info_hash, tracker.clone()).await?; - let peer_ip = tracker.assign_ip_address_to_peer(&remote_addr.ip()); + let peer_ip = tracker.assign_ip_address_to_peer(&remote_client_ip); let peer = peer_builder::from_request(&wrapped_announce_request, &peer_ip); - let torrent_stats = tracker - .update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer) - .await; + let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - // get all peers excluding the client_addr - let peers = tracker - .get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr) - .await; + let peers = tracker.get_other_peers(&info_hash, &peer.peer_addr).await; + + match remote_client_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Announce).await; + } + } #[allow(clippy::cast_possible_truncation)] let announce_response = if remote_addr.is_ipv4() { @@ -164,15 +178,6 @@ pub async fn handle_announce( }) }; - match remote_addr { - SocketAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Udp4Announce).await; - } - SocketAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Udp6Announce).await; - } - } - Ok(announce_response) } From cecbc17352af2d61ba6c6aa6ebcfbb62283004f4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 18:16:49 +0000 Subject: [PATCH 316/435] refactor: extract duplicate code from announce request handlers --- src/http/warp_implementation/handlers.rs | 12 ++++-------- src/tracker/mod.rs | 20 +++++++++++++++++++- src/tracker/peer.rs | 6 +++++- src/udp/handlers.rs | 22 ++++++++++------------ src/udp/mod.rs | 2 +- 5 files changed, 39 insertions(+), 23 deletions(-) diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 2a0aa005c..fd927150f 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -47,13 +47,9 @@ pub async fn handle_announce( authenticate(&info_hash, &auth_key, tracker.clone()).await?; - let peer_ip = tracker.assign_ip_address_to_peer(&remote_client_ip); + let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); - let peer = peer_builder::from_request(&announce_request, &peer_ip); - - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - - let peers = tracker.get_other_peers(&info_hash, &peer.peer_addr).await; + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; match remote_client_ip { IpAddr::V4(_) => { @@ -66,8 +62,8 @@ pub async fn handle_announce( send_announce_response( &announce_request, - &torrent_stats, - &peers, + &response.swam_stats, + &response.peers, tracker.config.announce_interval, tracker.config.min_announce_interval, ) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a6ea6d3b0..42dbec17c 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -17,6 +17,8 @@ use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; use self::error::Error; +use self::peer::Peer; +use self::torrent::SwamStats; use crate::config::Configuration; use crate::databases::driver::Driver; use crate::databases::{self, Database}; @@ -41,6 +43,11 @@ pub struct TorrentsMetrics { pub torrents: u64, } +pub struct AnnounceResponse { + pub peers: Vec, + pub swam_stats: SwamStats, +} + impl Tracker { /// # Errors /// @@ -76,7 +83,18 @@ impl Tracker { self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } - /// It assigns a socket address to the peer + /// It handles an announce request + pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceResponse { + peer.change_ip(&self.assign_ip_address_to_peer(remote_client_ip)); + + let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + + let peers = self.get_other_peers(info_hash, &peer.peer_addr).await; + + AnnounceResponse { peers, swam_stats } + } + + /// It assigns an IP address to the peer #[must_use] pub fn assign_ip_address_to_peer(&self, remote_client_ip: &IpAddr) -> IpAddr { assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip()) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 24cc99f9b..7559463db 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -1,4 +1,4 @@ -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; @@ -31,6 +31,10 @@ impl Peer { pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + + pub fn change_ip(&mut self, new_ip: &IpAddr) { + self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); + } } #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 283041333..8978beb70 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -118,13 +118,9 @@ pub async fn handle_announce( authenticate(&info_hash, tracker.clone()).await?; - let peer_ip = tracker.assign_ip_address_to_peer(&remote_client_ip); + let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); - let peer = peer_builder::from_request(&wrapped_announce_request, &peer_ip); - - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - - let peers = tracker.get_other_peers(&info_hash, &peer.peer_addr).await; + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; match remote_client_ip { IpAddr::V4(_) => { @@ -140,9 +136,10 @@ pub async fn handle_announce( Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), - peers: peers + leechers: NumberOfPeers(i64::from(response.swam_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swam_stats.seeders) as i32), + peers: response + .peers .iter() .filter_map(|peer| { if let IpAddr::V4(ip) = peer.peer_addr.ip() { @@ -160,9 +157,10 @@ pub async fn handle_announce( Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), - peers: peers + leechers: NumberOfPeers(i64::from(response.swam_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swam_stats.seeders) as i32), + peers: response + .peers .iter() .filter_map(|peer| { if let IpAddr::V6(ip) = peer.peer_addr.ip() { diff --git a/src/udp/mod.rs b/src/udp/mod.rs index b6431f752..7b755a20b 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -1,9 +1,9 @@ pub mod connection_cookie; pub mod error; pub mod handlers; +pub mod peer_builder; pub mod request; pub mod server; -pub mod peer_builder; pub type Bytes = u64; pub type Port = u16; From 3b207954ce787a1cec1a0c442146adeea7f0b623 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 19:17:50 +0000 Subject: [PATCH 317/435] refactor: function does not need to be pub anymore --- src/tracker/mod.rs | 12 ++++++++---- src/tracker/torrent.rs | 12 ++++++------ 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 42dbec17c..989980828 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -89,7 +89,8 @@ impl Tracker { let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - let peers = self.get_other_peers(info_hash, &peer.peer_addr).await; + // todo: remove peer by using its `Id` instead of its socket address: `get_peers_excluding_peer(peer_id: peer::Id)` + let peers = self.get_peers_excluding_peers_with_address(info_hash, &peer.peer_addr).await; AnnounceResponse { peers, swam_stats } } @@ -296,13 +297,16 @@ impl Tracker { Ok(()) } - /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_other_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + async fn get_peers_excluding_peers_with_address( + &self, + info_hash: &InfoHash, + excluded_address: &SocketAddr, + ) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(Some(client_addr)).into_iter().copied().collect(), + Some(entry) => entry.get_peers(Some(excluded_address)).into_iter().copied().collect(), } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index b5535a932..b7b79f0f5 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -49,22 +49,22 @@ impl Entry { } #[must_use] - pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&peer::Peer> { + pub fn get_peers(&self, optional_excluded_address: Option<&SocketAddr>) -> Vec<&peer::Peer> { self.peers .values() - .filter(|peer| match client_addr { + .filter(|peer| match optional_excluded_address { // Don't filter on ip_version None => true, // Filter out different ip_version from remote_addr - Some(remote_addr) => { + Some(excluded_address) => { // Skip ip address of client - if peer.peer_addr.ip() == remote_addr.ip() { + if peer.peer_addr.ip() == excluded_address.ip() { return false; } match peer.peer_addr.ip() { - IpAddr::V4(_) => remote_addr.is_ipv4(), - IpAddr::V6(_) => remote_addr.is_ipv6(), + IpAddr::V4(_) => excluded_address.is_ipv4(), + IpAddr::V6(_) => excluded_address.is_ipv6(), } } }) From 7fcc19d33a532c6ce8a8f06085a62cdea033b787 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Feb 2023 19:41:46 +0000 Subject: [PATCH 318/435] refactor: remove unneeded method and make another function private. --- src/tracker/mod.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 989980828..48bd76128 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -85,7 +85,7 @@ impl Tracker { /// It handles an announce request pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceResponse { - peer.change_ip(&self.assign_ip_address_to_peer(remote_client_ip)); + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -95,12 +95,6 @@ impl Tracker { AnnounceResponse { peers, swam_stats } } - /// It assigns an IP address to the peer - #[must_use] - pub fn assign_ip_address_to_peer(&self, remote_client_ip: &IpAddr) -> IpAddr { - assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip()) - } - /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. @@ -407,7 +401,7 @@ impl Tracker { } #[must_use] -pub fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { +fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { host_ip } else { From 03024e27971803ede2e96400ccfd532a80d53256 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Feb 2023 19:24:16 +0000 Subject: [PATCH 319/435] refactor(http): extract function to get client IP on reverse proxy --- src/http/axum_implementation/handlers.rs | 29 ++++-- src/http/axum_implementation/mod.rs | 2 +- .../{extractors.rs => requests/announce.rs} | 28 +++--- src/http/axum_implementation/requests/mod.rs | 1 + src/http/handlers/announce.rs | 1 + src/http/handlers/mod.rs | 88 +++++++++++++++++++ src/http/mod.rs | 1 + src/http/warp_implementation/filters.rs | 50 +++++------ tests/http/asserts.rs | 20 +++++ tests/http_tracker.rs | 84 ++++++++++++++++++ 10 files changed, 255 insertions(+), 49 deletions(-) rename src/http/axum_implementation/{extractors.rs => requests/announce.rs} (86%) create mode 100644 src/http/axum_implementation/requests/mod.rs create mode 100644 src/http/handlers/announce.rs create mode 100644 src/http/handlers/mod.rs diff --git a/src/http/axum_implementation/handlers.rs b/src/http/axum_implementation/handlers.rs index 050fa8e69..f7c6ba8f9 100644 --- a/src/http/axum_implementation/handlers.rs +++ b/src/http/axum_implementation/handlers.rs @@ -2,8 +2,9 @@ use std::sync::Arc; use axum::extract::State; use axum::response::Json; +use log::debug; -use super::extractors::ExtractAnnounceParams; +use super::requests::announce::ExtractAnnounceRequest; use super::resources::ok::Ok; use super::responses::ok_response; use crate::tracker::Tracker; @@ -13,13 +14,29 @@ pub async fn get_status_handler() -> Json { ok_response() } -/// # Panics -/// -/// todo +/// WIP #[allow(clippy::unused_async)] pub async fn announce_handler( State(_tracker): State>, - ExtractAnnounceParams(_announce_params): ExtractAnnounceParams, + ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, ) -> Json { - todo!() + /* todo: + - Extract remote client ip from request + - Build the `Peer` + - Call the `tracker.announce` method + - Send event for stats + - Move response from Warp to shared mod + - Send response + */ + + // Sample announce URL used for debugging: + // http://0.0.0.0:7070/announce?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548 + + debug!("http announce request: {:#?}", announce_request); + + let info_hash = announce_request.info_hash; + + debug!("info_hash: {:#?}", &info_hash); + + ok_response() } diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index 9d96362df..9e5e07979 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -1,6 +1,6 @@ -pub mod extractors; pub mod handlers; pub mod query; +pub mod requests; pub mod resources; pub mod responses; pub mod routes; diff --git a/src/http/axum_implementation/extractors.rs b/src/http/axum_implementation/requests/announce.rs similarity index 86% rename from src/http/axum_implementation/extractors.rs rename to src/http/axum_implementation/requests/announce.rs index a1f3fad1e..004301744 100644 --- a/src/http/axum_implementation/extractors.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -7,15 +7,15 @@ use axum::http::request::Parts; use axum::http::StatusCode; use thiserror::Error; -use super::query::Query; +use crate::http::axum_implementation::query::Query; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; -pub struct ExtractAnnounceParams(pub AnnounceParams); +pub struct ExtractAnnounceRequest(pub Announce); #[derive(Debug, PartialEq)] -pub struct AnnounceParams { +pub struct Announce { pub info_hash: InfoHash, pub peer_id: peer::Id, pub port: u16, @@ -55,7 +55,7 @@ impl From for ParseAnnounceQueryError { } } -impl TryFrom for AnnounceParams { +impl TryFrom for Announce { type Error = ParseAnnounceQueryError; fn try_from(query: Query) -> Result { @@ -103,13 +103,15 @@ fn extract_port(query: &Query) -> Result { } #[async_trait] -impl FromRequestParts for ExtractAnnounceParams +impl FromRequestParts for ExtractAnnounceRequest where S: Send + Sync, { type Rejection = (StatusCode, &'static str); async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + // todo: error responses body should be bencoded + let raw_query = parts.uri.query(); if raw_query.is_none() { @@ -122,34 +124,34 @@ where return Err((StatusCode::BAD_REQUEST, "can't parse query params")); } - let announce_params = AnnounceParams::try_from(query.unwrap()); + let announce_request = Announce::try_from(query.unwrap()); - if announce_params.is_err() { + if announce_request.is_err() { return Err((StatusCode::BAD_REQUEST, "can't parse query params for announce request")); } - Ok(ExtractAnnounceParams(announce_params.unwrap())) + Ok(ExtractAnnounceRequest(announce_request.unwrap())) } } #[cfg(test)] mod tests { - use super::AnnounceParams; + use super::Announce; use crate::http::axum_implementation::query::Query; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; #[test] - fn announce_request_params_should_be_extracted_from_url_query_params() { + fn announce_request_should_be_extracted_from_url_query_params() { let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; let query = raw_query.parse::().unwrap(); - let announce_params = AnnounceParams::try_from(query).unwrap(); + let announce_request = Announce::try_from(query).unwrap(); assert_eq!( - announce_params, - AnnounceParams { + announce_request, + Announce { info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), peer_id: "-qB00000000000000001".parse::().unwrap(), port: 17548, diff --git a/src/http/axum_implementation/requests/mod.rs b/src/http/axum_implementation/requests/mod.rs new file mode 100644 index 000000000..74894de33 --- /dev/null +++ b/src/http/axum_implementation/requests/mod.rs @@ -0,0 +1 @@ +pub mod announce; diff --git a/src/http/handlers/announce.rs b/src/http/handlers/announce.rs new file mode 100644 index 000000000..1f77cb921 --- /dev/null +++ b/src/http/handlers/announce.rs @@ -0,0 +1 @@ +pub fn handler() {} diff --git a/src/http/handlers/mod.rs b/src/http/handlers/mod.rs new file mode 100644 index 000000000..4481ddffd --- /dev/null +++ b/src/http/handlers/mod.rs @@ -0,0 +1,88 @@ +use std::net::{AddrParseError, IpAddr}; +use std::panic::Location; +use std::str::FromStr; + +use thiserror::Error; + +use crate::located_error::{Located, LocatedError}; + +pub mod announce; + +#[derive(Error, Debug)] +pub enum XForwardedForParseError { + #[error("Empty X-Forwarded-For header value, {location}")] + EmptyValue { location: &'static Location<'static> }, + + #[error("Invalid IP in X-Forwarded-For header: {source}")] + InvalidIp { source: LocatedError<'static, AddrParseError> }, +} + +impl From for XForwardedForParseError { + #[track_caller] + fn from(err: AddrParseError) -> Self { + Self::InvalidIp { + source: Located(err).into(), + } + } +} + +/// It extracts the last IP address from the `X-Forwarded-For` http header value. +/// +/// # Errors +/// +/// Will return and error if the last IP in the `X-Forwarded-For` header is not a valid IP +pub fn maybe_rightmost_forwarded_ip(x_forwarded_for_value: &str) -> Result { + let mut x_forwarded_for_raw = x_forwarded_for_value.to_string(); + + // Remove whitespace chars + x_forwarded_for_raw.retain(|c| !c.is_whitespace()); + + // Get all forwarded IP's in a vec + let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); + + match x_forwarded_ips.last() { + Some(last_ip) => match IpAddr::from_str(last_ip) { + Ok(ip) => Ok(ip), + Err(err) => Err(err.into()), + }, + None => Err(XForwardedForParseError::EmptyValue { + location: Location::caller(), + }), + } +} + +#[cfg(test)] +mod tests { + + use std::net::IpAddr; + use std::str::FromStr; + + use crate::http::handlers::maybe_rightmost_forwarded_ip; + + #[test] + fn the_last_forwarded_ip_can_be_parsed_from_the_the_corresponding_http_header() { + assert!(maybe_rightmost_forwarded_ip("").is_err()); + + assert!(maybe_rightmost_forwarded_ip("INVALID IP").is_err()); + + assert_eq!( + maybe_rightmost_forwarded_ip("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap(), + IpAddr::from_str("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap() + ); + + assert_eq!( + maybe_rightmost_forwarded_ip("203.0.113.195").unwrap(), + IpAddr::from_str("203.0.113.195").unwrap() + ); + + assert_eq!( + maybe_rightmost_forwarded_ip("203.0.113.195, 2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap(), + IpAddr::from_str("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap() + ); + + assert_eq!( + maybe_rightmost_forwarded_ip("203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178").unwrap(), + IpAddr::from_str("150.172.238.178").unwrap() + ); + } +} diff --git a/src/http/mod.rs b/src/http/mod.rs index 039a2067b..1425afe07 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -13,6 +13,7 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; +pub mod handlers; pub mod percent_encoding; pub mod warp_implementation; diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index 176170330..2a218491b 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -1,13 +1,13 @@ use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; use std::panic::Location; -use std::str::FromStr; use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; use super::{request, WebResult}; +use crate::http::handlers::maybe_rightmost_forwarded_ip; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; @@ -138,41 +138,33 @@ fn peer_id(raw_query: &String) -> WebResult { } } -/// Get `PeerAddress` from `RemoteAddress` or Forwarded -fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { - if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: "neither on have remote address or on a reverse proxy".to_string(), - })); - } +/// Get peer IP from HTTP client IP or X-Forwarded-For HTTP header +fn peer_addr( + (on_reverse_proxy, remote_client_ip, maybe_x_forwarded_for): (bool, Option, Option), +) -> WebResult { + if on_reverse_proxy { + if maybe_x_forwarded_for.is_none() { + return Err(reject::custom(Error::AddressNotFound { + location: Location::caller(), + message: "must have a x-forwarded-for when using a reverse proxy".to_string(), + })); + } - if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: "must have a x-forwarded-for when using a reverse proxy".to_string(), - })); - } + let x_forwarded_for = maybe_x_forwarded_for.unwrap(); - if on_reverse_proxy { - let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); - // remove whitespace chars - x_forwarded_for_raw.retain(|c| !c.is_whitespace()); - // get all forwarded ip's in a vec - let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); - // set client ip to last forwarded ip - let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - - IpAddr::from_str(x_forwarded_ip).map_err(|e| { + maybe_rightmost_forwarded_ip(&x_forwarded_for).map_err(|e| { reject::custom(Error::AddressNotFound { location: Location::caller(), - message: format!( - "on remote proxy and unable to parse the last x-forwarded-ip: `{e}`, from `{x_forwarded_for_raw}`" - ), + message: format!("on remote proxy and unable to parse the last x-forwarded-ip: `{e}`, from `{x_forwarded_for}`"), }) }) + } else if remote_client_ip.is_none() { + return Err(reject::custom(Error::AddressNotFound { + location: Location::caller(), + message: "neither on have remote address or on a reverse proxy".to_string(), + })); } else { - Ok(remote_addr.unwrap().ip()) + return Ok(remote_client_ip.unwrap().ip()); } } diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 211a7bb33..8a1e2b554 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -127,3 +127,23 @@ pub async fn assert_invalid_authentication_key_error_response(response: Response assert_error_bencoded(&response.text().await.unwrap(), "is not valid", Location::caller()); } + +pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_error_bencoded( + &response.text().await.unwrap(), + "could not find remote address: must have a x-forwarded-for when using a reverse proxy", + Location::caller(), + ); +} + +pub async fn assert_invalid_remote_address_on_xff_header_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_error_bencoded( + &response.text().await.unwrap(), + "could not find remote address: on remote proxy and unable to parse the last x-forwarded-ip", + Location::caller(), + ); +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 60219d9fe..409c5d343 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -16,6 +16,47 @@ mod warp_http_tracker_server { mod for_all_config_modes { + mod running_on_reverse_proxy { + use torrust_tracker::http::Version; + + use crate::http::asserts::{ + assert_could_not_find_remote_address_on_xff_header_error_response, + assert_invalid_remote_address_on_xff_header_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_http_tracker_on_reverse_proxy; + + #[tokio::test] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + // If the tracker is running behind a reverse proxy, the peer IP is the + // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. + + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_could_not_find_remote_address_on_xff_header_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; + + assert_invalid_remote_address_on_xff_header_error_response(response).await; + } + } + mod receiving_an_announce_request { // Announce request documentation: @@ -1243,6 +1284,49 @@ mod axum_http_tracker_server { mod for_all_config_modes { + mod and_running_on_reverse_proxy { + use torrust_tracker::http::Version; + + use crate::http::asserts::{ + assert_could_not_find_remote_address_on_xff_header_error_response, + assert_invalid_remote_address_on_xff_header_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::server::start_http_tracker_on_reverse_proxy; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + // If the tracker is running behind a reverse proxy, the peer IP is the + // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. + + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{params}")) + .await; + + assert_could_not_find_remote_address_on_xff_header_error_response(response).await; + } + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(http_tracker_server.get_connection_info()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; + + assert_invalid_remote_address_on_xff_header_error_response(response).await; + } + } + mod receiving_an_announce_request { // Announce request documentation: From d0c8eb07246431850a8c6bcfafb69d3fc3a1b83c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 13 Feb 2023 15:58:08 +0000 Subject: [PATCH 320/435] refactor(http): reorganize mods --- .../{handlers.rs => handlers/announce.rs} | 15 +++++---------- src/http/axum_implementation/handlers/mod.rs | 2 ++ src/http/axum_implementation/handlers/status.rs | 11 +++++++++++ src/http/axum_implementation/responses.rs | 10 ---------- src/http/axum_implementation/responses/mod.rs | 1 + src/http/axum_implementation/responses/ok.rs | 8 ++++++++ src/http/axum_implementation/routes.rs | 5 +++-- src/http/handlers/announce.rs | 1 - src/http/mod.rs | 1 - .../filter_helpers.rs} | 4 +--- src/http/warp_implementation/filters.rs | 2 +- src/http/warp_implementation/mod.rs | 5 +++-- 12 files changed, 35 insertions(+), 30 deletions(-) rename src/http/axum_implementation/{handlers.rs => handlers/announce.rs} (76%) create mode 100644 src/http/axum_implementation/handlers/mod.rs create mode 100644 src/http/axum_implementation/handlers/status.rs delete mode 100644 src/http/axum_implementation/responses.rs create mode 100644 src/http/axum_implementation/responses/mod.rs create mode 100644 src/http/axum_implementation/responses/ok.rs delete mode 100644 src/http/handlers/announce.rs rename src/http/{handlers/mod.rs => warp_implementation/filter_helpers.rs} (97%) diff --git a/src/http/axum_implementation/handlers.rs b/src/http/axum_implementation/handlers/announce.rs similarity index 76% rename from src/http/axum_implementation/handlers.rs rename to src/http/axum_implementation/handlers/announce.rs index f7c6ba8f9..71bd0a0e2 100644 --- a/src/http/axum_implementation/handlers.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -4,19 +4,14 @@ use axum::extract::State; use axum::response::Json; use log::debug; -use super::requests::announce::ExtractAnnounceRequest; -use super::resources::ok::Ok; -use super::responses::ok_response; +use crate::http::axum_implementation::requests::announce::ExtractAnnounceRequest; +use crate::http::axum_implementation::resources::ok::Ok; +use crate::http::axum_implementation::responses::ok; use crate::tracker::Tracker; -#[allow(clippy::unused_async)] -pub async fn get_status_handler() -> Json { - ok_response() -} - /// WIP #[allow(clippy::unused_async)] -pub async fn announce_handler( +pub async fn handle( State(_tracker): State>, ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, ) -> Json { @@ -38,5 +33,5 @@ pub async fn announce_handler( debug!("info_hash: {:#?}", &info_hash); - ok_response() + ok::response() } diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs new file mode 100644 index 000000000..bff05984c --- /dev/null +++ b/src/http/axum_implementation/handlers/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod status; diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs new file mode 100644 index 000000000..3e9c98466 --- /dev/null +++ b/src/http/axum_implementation/handlers/status.rs @@ -0,0 +1,11 @@ +/// Temporary handler for testing and debugging the new Axum implementation +/// It should be removed once the migration to Axum is finished. +use axum::response::Json; + +use crate::http::axum_implementation::resources::ok::Ok; +use crate::http::axum_implementation::responses::ok; + +#[allow(clippy::unused_async)] +pub async fn get_status_handler() -> Json { + ok::response() +} diff --git a/src/http/axum_implementation/responses.rs b/src/http/axum_implementation/responses.rs deleted file mode 100644 index 9c5896b35..000000000 --- a/src/http/axum_implementation/responses.rs +++ /dev/null @@ -1,10 +0,0 @@ -// Resource responses - -use axum::Json; - -use super::resources::ok::Ok; - -#[must_use] -pub fn ok_response() -> Json { - Json(Ok {}) -} diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs new file mode 100644 index 000000000..a493c2ac2 --- /dev/null +++ b/src/http/axum_implementation/responses/mod.rs @@ -0,0 +1 @@ +pub mod ok; diff --git a/src/http/axum_implementation/responses/ok.rs b/src/http/axum_implementation/responses/ok.rs new file mode 100644 index 000000000..b08ea032f --- /dev/null +++ b/src/http/axum_implementation/responses/ok.rs @@ -0,0 +1,8 @@ +use axum::Json; + +use crate::http::axum_implementation::resources::ok::Ok; + +#[must_use] +pub fn response() -> Json { + Json(Ok {}) +} diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 8e4980682..625d4656f 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -3,7 +3,8 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; -use super::handlers::{announce_handler, get_status_handler}; +use super::handlers::announce::handle; +use super::handlers::status::get_status_handler; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { @@ -11,5 +12,5 @@ pub fn router(tracker: &Arc) -> Router { // Status .route("/status", get(get_status_handler)) // Announce request - .route("/announce", get(announce_handler).with_state(tracker.clone())) + .route("/announce", get(handle).with_state(tracker.clone())) } diff --git a/src/http/handlers/announce.rs b/src/http/handlers/announce.rs deleted file mode 100644 index 1f77cb921..000000000 --- a/src/http/handlers/announce.rs +++ /dev/null @@ -1 +0,0 @@ -pub fn handler() {} diff --git a/src/http/mod.rs b/src/http/mod.rs index 1425afe07..039a2067b 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -13,7 +13,6 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; -pub mod handlers; pub mod percent_encoding; pub mod warp_implementation; diff --git a/src/http/handlers/mod.rs b/src/http/warp_implementation/filter_helpers.rs similarity index 97% rename from src/http/handlers/mod.rs rename to src/http/warp_implementation/filter_helpers.rs index 4481ddffd..89188d868 100644 --- a/src/http/handlers/mod.rs +++ b/src/http/warp_implementation/filter_helpers.rs @@ -6,8 +6,6 @@ use thiserror::Error; use crate::located_error::{Located, LocatedError}; -pub mod announce; - #[derive(Error, Debug)] pub enum XForwardedForParseError { #[error("Empty X-Forwarded-For header value, {location}")] @@ -57,7 +55,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; - use crate::http::handlers::maybe_rightmost_forwarded_ip; + use super::maybe_rightmost_forwarded_ip; #[test] fn the_last_forwarded_ip_can_be_parsed_from_the_the_corresponding_http_header() { diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index 2a218491b..fc8ef20bc 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -6,8 +6,8 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; use super::error::Error; +use super::filter_helpers::maybe_rightmost_forwarded_ip; use super::{request, WebResult}; -use crate::http::handlers::maybe_rightmost_forwarded_ip; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs index 1dec73b29..2ceda2e68 100644 --- a/src/http/warp_implementation/mod.rs +++ b/src/http/warp_implementation/mod.rs @@ -1,6 +1,5 @@ -use warp::Rejection; - pub mod error; +pub mod filter_helpers; pub mod filters; pub mod handlers; pub mod peer_builder; @@ -9,5 +8,7 @@ pub mod response; pub mod routes; pub mod server; +use warp::Rejection; + pub type Bytes = u64; pub type WebResult = std::result::Result; From f327dcfa9139ff88b8582b79f0e07489187a7349 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 13 Feb 2023 19:45:17 +0000 Subject: [PATCH 321/435] fix(http): [#184] bencoded error responses for announce request HTTP tracker error responser must be bencoded. Fixed in the new Axum implementation. --- src/http/axum_implementation/query.rs | 2 +- .../axum_implementation/requests/announce.rs | 55 +++++++++++++------ .../axum_implementation/responses/error.rs | 40 ++++++++++++++ src/http/axum_implementation/responses/mod.rs | 1 + tests/http/asserts.rs | 50 +++++++++++++---- tests/http_tracker.rs | 53 ++++++++++-------- 6 files changed, 151 insertions(+), 50 deletions(-) create mode 100644 src/http/axum_implementation/responses/error.rs diff --git a/src/http/axum_implementation/query.rs b/src/http/axum_implementation/query.rs index c7c20b22d..3c9c676f1 100644 --- a/src/http/axum_implementation/query.rs +++ b/src/http/axum_implementation/query.rs @@ -45,7 +45,7 @@ impl FromStr for Param { fn from_str(raw_param: &str) -> Result { let pair = raw_param.split('=').collect::>(); - if pair.len() > 2 { + if pair.len() != 2 { return Err(ParseQueryError::InvalidParam { location: Location::caller(), raw_param: raw_param.to_owned(), diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 004301744..34a9ad98a 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -4,10 +4,11 @@ use std::str::FromStr; use axum::async_trait; use axum::extract::FromRequestParts; use axum::http::request::Parts; -use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; use thiserror::Error; -use crate::http::axum_implementation::query::Query; +use crate::http::axum_implementation::query::{ParseQueryError, Query}; +use crate::http::axum_implementation::responses; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; @@ -23,17 +24,17 @@ pub struct Announce { #[derive(Error, Debug)] pub enum ParseAnnounceQueryError { - #[error("missing infohash {location}")] + #[error("missing info_hash param: {location}")] MissingInfoHash { location: &'static Location<'static> }, - #[error("invalid infohash {location}")] + #[error("invalid info_hash param: {location}")] InvalidInfoHash { location: &'static Location<'static> }, - #[error("missing peer id {location}")] + #[error("missing peer_id param: {location}")] MissingPeerId { location: &'static Location<'static> }, - #[error("invalid peer id {location}")] + #[error("invalid peer_id param: {location}")] InvalidPeerId { location: &'static Location<'static> }, - #[error("missing port {location}")] + #[error("missing port param: {location}")] MissingPort { location: &'static Location<'static> }, - #[error("invalid port {location}")] + #[error("invalid port param: {location}")] InvalidPort { location: &'static Location<'static> }, } @@ -49,12 +50,31 @@ impl From for ParseAnnounceQueryError { impl From for ParseAnnounceQueryError { #[track_caller] fn from(_err: ConversionError) -> Self { - Self::InvalidPeerId { + Self::InvalidInfoHash { location: Location::caller(), } } } +impl From for responses::error::Error { + fn from(err: ParseQueryError) -> Self { + responses::error::Error { + // code-review: should we expose error location in public HTTP tracker API? + // Error message example: "Cannot parse query params: invalid param a=b=c in src/http/axum_implementation/query.rs:50:27" + failure_reason: format!("Cannot parse query params: {err}"), + } + } +} + +impl From for responses::error::Error { + fn from(err: ParseAnnounceQueryError) -> Self { + responses::error::Error { + // code-review: should we expose error location in public HTTP tracker API? + failure_reason: format!("Cannot parse query params for announce request: {err}"), + } + } +} + impl TryFrom for Announce { type Error = ParseAnnounceQueryError; @@ -107,27 +127,28 @@ impl FromRequestParts for ExtractAnnounceRequest where S: Send + Sync, { - type Rejection = (StatusCode, &'static str); + type Rejection = Response; async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - // todo: error responses body should be bencoded - let raw_query = parts.uri.query(); if raw_query.is_none() { - return Err((StatusCode::BAD_REQUEST, "missing query params")); + return Err(responses::error::Error { + failure_reason: "missing query params for announce request".to_string(), + } + .into_response()); } let query = raw_query.unwrap().parse::(); - if query.is_err() { - return Err((StatusCode::BAD_REQUEST, "can't parse query params")); + if let Err(error) = query { + return Err(responses::error::Error::from(error).into_response()); } let announce_request = Announce::try_from(query.unwrap()); - if announce_request.is_err() { - return Err((StatusCode::BAD_REQUEST, "can't parse query params for announce request")); + if let Err(error) = announce_request { + return Err(responses::error::Error::from(error).into_response()); } Ok(ExtractAnnounceRequest(announce_request.unwrap())) diff --git a/src/http/axum_implementation/responses/error.rs b/src/http/axum_implementation/responses/error.rs new file mode 100644 index 000000000..bcf2aaa57 --- /dev/null +++ b/src/http/axum_implementation/responses/error.rs @@ -0,0 +1,40 @@ +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use serde::{self, Serialize}; + +#[derive(Serialize)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} + +impl Error { + /// # Panics + /// + /// It would panic if the `Error` struct contained an inappropriate type. + #[must_use] + pub fn write(&self) -> String { + serde_bencode::to_string(&self).unwrap() + } +} + +impl IntoResponse for Error { + fn into_response(self) -> Response { + (StatusCode::OK, self.write()).into_response() + } +} + +#[cfg(test)] +mod tests { + + use super::Error; + + #[test] + fn http_tracker_errors_can_be_bencoded() { + let err = Error { + failure_reason: "error message".to_owned(), + }; + + assert_eq!(err.write(), "d14:failure reason13:error messagee"); // cspell:disable-line + } +} diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs index a493c2ac2..d55a66679 100644 --- a/src/http/axum_implementation/responses/mod.rs +++ b/src/http/axum_implementation/responses/mod.rs @@ -1 +1,2 @@ +pub mod error; pub mod ok; diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 8a1e2b554..e146f252d 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -6,7 +6,7 @@ use super::responses::announce::{Announce, Compact, DeserializedCompact}; use super::responses::scrape; use crate::http::responses::error::Error; -pub fn assert_error_bencoded(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { +pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { let error_failure_reason = serde_bencode::from_str::(response_text) .unwrap_or_else(|_| panic!( "response body should be a valid bencoded string for the '{expected_failure_reason}' error, got \"{response_text}\"" @@ -18,7 +18,7 @@ pub fn assert_error_bencoded(response_text: &String, expected_failure_reason: &s error_failure_reason.contains(expected_failure_reason), r#": response: `"{error_failure_reason}"` - dose not contain: `"{expected_failure_reason}"`, {location}"# + does not contain: `"{expected_failure_reason}"`, {location}"# ); } @@ -83,13 +83,13 @@ pub async fn assert_is_announce_response(response: Response) { pub async fn assert_internal_server_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded(&response.text().await.unwrap(), "internal server", Location::caller()); + assert_bencoded_error(&response.text().await.unwrap(), "internal server", Location::caller()); } pub async fn assert_invalid_info_hash_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "no valid infohashes found", Location::caller(), @@ -99,7 +99,7 @@ pub async fn assert_invalid_info_hash_error_response(response: Response) { pub async fn assert_invalid_peer_id_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "peer_id is either missing or invalid", Location::caller(), @@ -109,13 +109,13 @@ pub async fn assert_invalid_peer_id_error_response(response: Response) { pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); + assert_bencoded_error(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); } pub async fn assert_peer_not_authenticated_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "The peer is not authenticated", Location::caller(), @@ -125,13 +125,13 @@ pub async fn assert_peer_not_authenticated_error_response(response: Response) { pub async fn assert_invalid_authentication_key_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded(&response.text().await.unwrap(), "is not valid", Location::caller()); + assert_bencoded_error(&response.text().await.unwrap(), "is not valid", Location::caller()); } pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "could not find remote address: must have a x-forwarded-for when using a reverse proxy", Location::caller(), @@ -141,9 +141,39 @@ pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(r pub async fn assert_invalid_remote_address_on_xff_header_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_error_bencoded( + assert_bencoded_error( &response.text().await.unwrap(), "could not find remote address: on remote proxy and unable to parse the last x-forwarded-ip", Location::caller(), ); } + +// Specific errors for announce request + +pub async fn assert_missing_query_params_for_announce_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for announce request", + Location::caller(), + ); +} + +pub async fn assert_bad_announce_request_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(" for announce request: {failure}")).await; +} + +pub async fn assert_cannot_parse_query_param_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(": {failure}")).await; +} + +pub async fn assert_cannot_parse_query_params_error_response(response: Response, failure: &str) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + &format!("Cannot parse query params{failure}"), + Location::caller(), + ); +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 409c5d343..85494c301 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -110,7 +110,7 @@ mod warp_http_tracker_server { } #[tokio::test] - async fn should_fail_when_the_request_is_empty() { + async fn should_fail_when_the_url_query_component_is_empty() { let http_tracker_server = start_default_http_tracker(Version::Warp).await; let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; @@ -1351,9 +1351,10 @@ mod axum_http_tracker_server { use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ - assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, - assert_internal_server_error_response, assert_invalid_info_hash_error_response, - assert_invalid_peer_id_error_response, assert_is_announce_response, + assert_announce_response, assert_bad_announce_request_error_response, + assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, + assert_compact_announce_response, assert_empty_announce_response, assert_internal_server_error_response, + assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, }; use crate::http::client::Client; use crate::http::requests::announce::{Compact, QueryBuilder}; @@ -1380,18 +1381,29 @@ mod axum_http_tracker_server { assert_is_announce_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] - async fn should_fail_when_the_request_is_empty() { + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; - assert_internal_server_error_response(response).await; + assert_missing_query_params_for_announce_request_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] + async fn should_fail_when_url_query_parameters_are_invalid() { + let http_tracker_server = start_default_http_tracker(Version::Axum).await; + + let invalid_query_param = "a=b=c"; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!("announce?{invalid_query_param}")) + .await; + + assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + } + + #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1405,7 +1417,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_invalid_info_hash_error_response(response).await; + assert_bad_announce_request_error_response(response, "missing info_hash param").await; // Without `peer_id` param @@ -1417,7 +1429,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_invalid_peer_id_error_response(response).await; + assert_bad_announce_request_error_response(response, "missing peer_id param").await; // Without `port` param @@ -1429,11 +1441,10 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "missing port param").await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1446,7 +1457,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_invalid_info_hash_error_response(response).await; + assert_cannot_parse_query_params_error_response(response, "").await; } } @@ -1511,8 +1522,7 @@ mod axum_http_tracker_server { } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1534,12 +1544,11 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_invalid_peer_id_error_response(response).await; + assert_cannot_parse_query_params_error_response(response, "").await; } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1554,7 +1563,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_cannot_parse_query_params_error_response(response, "").await; } } From 831805743a4841903a65c55072e157d0cee78a39 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 Feb 2023 14:07:02 +0000 Subject: [PATCH 322/435] feat: [#184] add dependency: axum-client-ip It will be used to extract the right most IP in the X-Forwarded-For header when the tracer is running on reverse proxy. --- Cargo.lock | 28 ++++++++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 29 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 8347362ab..6f9d9231b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,6 +135,17 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum-client-ip" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d719fabd6813392bbc10e1fe67f2977fad52791a836e51236f7e02f2482e017" +dependencies = [ + "axum", + "forwarded-header-value", + "serde", +] + [[package]] name = "axum-core" version = "0.3.0" @@ -706,6 +717,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror", +] + [[package]] name = "fragile" version = "2.0.0" @@ -1550,6 +1571,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nonempty" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" + [[package]] name = "normalize-line-endings" version = "0.3.0" @@ -2832,6 +2859,7 @@ dependencies = [ "aquatic_udp_protocol", "async-trait", "axum", + "axum-client-ip", "axum-server", "binascii", "chrono", diff --git a/Cargo.toml b/Cargo.toml index cf90da8f1..75ffa7935 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,6 +60,7 @@ aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } +axum-client-ip = "0.4.0" [dev-dependencies] From 42bd313c3ff7e1bf9351cb89aa7c9e4e70dcb170 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 Feb 2023 14:12:23 +0000 Subject: [PATCH 323/435] feat: [#184] calculate remote client ip depending on whether the tracker is running on reverse proxy or not Obtaining the remote peer client IP could be a complex task. See: https://adam-p.ca/blog/2022/03/x-forwarded-for/#multiple-headers We were using a custom function to extract the rigth most IP in the X-Forwarded-For HTTP header. This commit starts using an external crate for that. --- .../axum_implementation/handlers/announce.rs | 10 +- .../axum_implementation/handlers/status.rs | 5 +- src/http/axum_implementation/resources/ok.rs | 7 +- src/http/axum_implementation/responses/ok.rs | 9 +- src/http/axum_implementation/routes.rs | 8 ++ src/http/axum_implementation/server.rs | 4 +- tests/http_tracker.rs | 120 ++++++++++++++++-- 7 files changed, 144 insertions(+), 19 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 71bd0a0e2..9b373495d 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use axum::extract::State; use axum::response::Json; +use axum_client_ip::{InsecureClientIp, SecureClientIp}; use log::debug; use crate::http::axum_implementation::requests::announce::ExtractAnnounceRequest; @@ -14,6 +15,8 @@ use crate::tracker::Tracker; pub async fn handle( State(_tracker): State>, ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, + insecure_ip: InsecureClientIp, + secure_ip: SecureClientIp, ) -> Json { /* todo: - Extract remote client ip from request @@ -27,11 +30,12 @@ pub async fn handle( // Sample announce URL used for debugging: // http://0.0.0.0:7070/announce?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548 - debug!("http announce request: {:#?}", announce_request); - let info_hash = announce_request.info_hash; + debug!("http announce request: {:#?}", announce_request); debug!("info_hash: {:#?}", &info_hash); + debug!("remote client ip, insecure_ip: {:#?}", &insecure_ip); + debug!("remote client ip, secure_ip: {:#?}", &secure_ip); - ok::response() + ok::response(&insecure_ip.0, &secure_ip.0) } diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs index 3e9c98466..37d88321c 100644 --- a/src/http/axum_implementation/handlers/status.rs +++ b/src/http/axum_implementation/handlers/status.rs @@ -1,11 +1,12 @@ /// Temporary handler for testing and debugging the new Axum implementation /// It should be removed once the migration to Axum is finished. use axum::response::Json; +use axum_client_ip::{InsecureClientIp, SecureClientIp}; use crate::http::axum_implementation::resources::ok::Ok; use crate::http::axum_implementation::responses::ok; #[allow(clippy::unused_async)] -pub async fn get_status_handler() -> Json { - ok::response() +pub async fn get_status_handler(insecure_ip: InsecureClientIp, secure_ip: SecureClientIp) -> Json { + ok::response(&insecure_ip.0, &secure_ip.0) } diff --git a/src/http/axum_implementation/resources/ok.rs b/src/http/axum_implementation/resources/ok.rs index adc56e6ea..4a3495d0f 100644 --- a/src/http/axum_implementation/resources/ok.rs +++ b/src/http/axum_implementation/resources/ok.rs @@ -1,4 +1,9 @@ +use std::net::IpAddr; + use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct Ok {} +pub struct Ok { + pub remote_client_insecure_ip: IpAddr, + pub remote_client_secure_ip: IpAddr, +} diff --git a/src/http/axum_implementation/responses/ok.rs b/src/http/axum_implementation/responses/ok.rs index b08ea032f..a2d61749d 100644 --- a/src/http/axum_implementation/responses/ok.rs +++ b/src/http/axum_implementation/responses/ok.rs @@ -1,8 +1,13 @@ +use std::net::IpAddr; + use axum::Json; use crate::http::axum_implementation::resources::ok::Ok; #[must_use] -pub fn response() -> Json { - Json(Ok {}) +pub fn response(remote_client_insecure_ip: &IpAddr, remote_client_secure_ip: &IpAddr) -> Json { + Json(Ok { + remote_client_insecure_ip: *remote_client_insecure_ip, + remote_client_secure_ip: *remote_client_secure_ip, + }) } diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 625d4656f..a32a60ec0 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -2,15 +2,23 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; +use axum_client_ip::SecureClientIpSource; use super::handlers::announce::handle; use super::handlers::status::get_status_handler; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { + let secure_client_ip_source = if tracker.config.on_reverse_proxy { + SecureClientIpSource::RightmostXForwardedFor + } else { + SecureClientIpSource::ConnectInfo + }; + Router::new() // Status .route("/status", get(get_status_handler)) // Announce request .route("/announce", get(handle).with_state(tracker.clone())) + .layer(secure_client_ip_source.into_extension()) } diff --git a/src/http/axum_implementation/server.rs b/src/http/axum_implementation/server.rs index 541dda33e..30c580af6 100644 --- a/src/http/axum_implementation/server.rs +++ b/src/http/axum_implementation/server.rs @@ -13,7 +13,7 @@ use crate::tracker::Tracker; pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = router(tracker); - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); server.with_graceful_shutdown(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); @@ -39,5 +39,5 @@ pub fn start_tls( axum_server::bind_rustls(socket_addr, ssl_config) .handle(handle) - .serve(app.into_make_service()) + .serve(app.into_make_service_with_connect_info::()) } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 85494c301..b1b7735dc 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -708,9 +708,6 @@ mod warp_http_tracker_server { let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - // todo: shouldn't be the the leftmost IP address? - // THe application is taken the the rightmost IP address. See function http::filters::peer_addr - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For client .announce_with_header( &announce_query, @@ -1266,6 +1263,8 @@ mod axum_http_tracker_server { // WIP: migration HTTP from Warp to Axum + use local_ip_address::local_ip; + use torrust_tracker::http::axum_implementation::resources::ok::Ok; use torrust_tracker::http::Version; use crate::http::client::Client; @@ -1274,12 +1273,118 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_status() { // This is a temporary test to test the new Axum HTTP tracker server scaffolding + let http_tracker_server = start_default_http_tracker(Version::Axum).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("status").await; + let client_ip = local_ip().unwrap(); + + let response = Client::bind(http_tracker_server.get_connection_info(), client_ip) + .get("status") + .await; + + let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); + + assert_eq!( + ok, + Ok { + remote_client_insecure_ip: client_ip, + remote_client_secure_ip: client_ip + } + ); + } + + mod should_get_the_remote_client_ip_from_the_http_request { + + // Temporary tests to test that the new Axum HTTP tracker gets the right remote client IP. + // Once the implementation is finished, test for announce request will cover these cases. + + use std::net::IpAddr; + use std::str::FromStr; + + use local_ip_address::local_ip; + use torrust_tracker::http::axum_implementation::resources::ok::Ok; + use torrust_tracker::http::Version; + + use crate::http::client::Client; + use crate::http::server::{start_http_tracker_on_reverse_proxy, start_public_http_tracker}; + + #[tokio::test] + async fn when_the_client_ip_is_a_local_ip_it_should_assign_that_ip() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let client_ip = local_ip().unwrap(); + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let response = client.get("status").await; + + let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); + + assert_eq!( + ok, + Ok { + remote_client_insecure_ip: client_ip, + remote_client_secure_ip: client_ip + } + ); + } - assert_eq!(response.status(), 200); - assert_eq!(response.text().await.unwrap(), "{}"); + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_that_ip() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + + let response = client.get("status").await; + + let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); + + assert_eq!( + ok, + Ok { + remote_client_insecure_ip: client_ip, + remote_client_secure_ip: client_ip + } + ); + } + + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_as_secure_ip_the_right_most_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: remote client ip: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + + let client = Client::new(http_tracker_server.get_connection_info()); + + let left_most_ip = IpAddr::from_str("203.0.113.195").unwrap(); + let right_most_ip = IpAddr::from_str("150.172.238.178").unwrap(); + + let response = client + .get_with_header( + "status", + "X-Forwarded-For", + &format!("{left_most_ip},2001:db8:85a3:8d3:1319:8a2e:370:7348,{right_most_ip}"), + ) + .await; + + let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); + + assert_eq!( + ok, + Ok { + remote_client_insecure_ip: left_most_ip, + remote_client_secure_ip: right_most_ip + } + ); + } } mod for_all_config_modes { @@ -2014,9 +2119,6 @@ mod axum_http_tracker_server { let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - // todo: shouldn't be the the leftmost IP address? - // THe application is taken the the rightmost IP address. See function http::filters::peer_addr - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For client .announce_with_header( &announce_query, From 3eb7475100b2dc4bc99a8badb8ac225617e6cdd0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 Feb 2023 18:50:10 +0000 Subject: [PATCH 324/435] feat(http): [#184] normal (non-compact) announce response in axum tracker Implemeneted the normal (non-compact) announce response in the new Axum implementation for the HTTP tracker. Only for the tracker public mode and with only the mandatory announce request params. --- .../axum_implementation/handlers/announce.rs | 67 ++++++++------ .../axum_implementation/responses/announce.rs | 91 +++++++++++++++++++ src/http/axum_implementation/responses/mod.rs | 1 + src/tracker/mod.rs | 9 +- tests/http_tracker.rs | 38 +++----- 5 files changed, 154 insertions(+), 52 deletions(-) create mode 100644 src/http/axum_implementation/responses/announce.rs diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 9b373495d..3ae0b7334 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -1,41 +1,56 @@ +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use axum::extract::State; -use axum::response::Json; -use axum_client_ip::{InsecureClientIp, SecureClientIp}; -use log::debug; +use axum::response::{IntoResponse, Response}; +use axum_client_ip::SecureClientIp; -use crate::http::axum_implementation::requests::announce::ExtractAnnounceRequest; -use crate::http::axum_implementation::resources::ok::Ok; -use crate::http::axum_implementation::responses::ok; -use crate::tracker::Tracker; +use crate::http::axum_implementation::requests::announce::{Announce, ExtractAnnounceRequest}; +use crate::http::axum_implementation::responses; +use crate::protocol::clock::{Current, Time}; +use crate::tracker::peer::Peer; +use crate::tracker::{statistics, Tracker}; /// WIP #[allow(clippy::unused_async)] pub async fn handle( - State(_tracker): State>, + State(tracker): State>, ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, - insecure_ip: InsecureClientIp, secure_ip: SecureClientIp, -) -> Json { - /* todo: - - Extract remote client ip from request - - Build the `Peer` - - Call the `tracker.announce` method - - Send event for stats - - Move response from Warp to shared mod - - Send response - */ - - // Sample announce URL used for debugging: - // http://0.0.0.0:7070/announce?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548 +) -> Response { + // todo: compact response and optional params let info_hash = announce_request.info_hash; + let remote_client_ip = secure_ip.0; - debug!("http announce request: {:#?}", announce_request); - debug!("info_hash: {:#?}", &info_hash); - debug!("remote client ip, insecure_ip: {:#?}", &insecure_ip); - debug!("remote client ip, secure_ip: {:#?}", &secure_ip); + let mut peer = peer_from_request(&announce_request, &remote_client_ip); - ok::response(&insecure_ip.0, &secure_ip.0) + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; + + match remote_client_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Announce).await; + } + } + + responses::announce::Announce::from(response).into_response() +} + +#[must_use] +fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { + #[allow(clippy::cast_possible_truncation)] + Peer { + peer_id: announce_request.peer_id, + peer_addr: SocketAddr::new(*peer_ip, announce_request.port), + updated: Current::now(), + // todo: optional parameters not included in the announce request yet + uploaded: NumberOfBytes(i128::from(0) as i64), + downloaded: NumberOfBytes(i128::from(0) as i64), + left: NumberOfBytes(i128::from(0) as i64), + event: AnnounceEvent::None, + } } diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs new file mode 100644 index 000000000..63ec74ac2 --- /dev/null +++ b/src/http/axum_implementation/responses/announce.rs @@ -0,0 +1,91 @@ +use std::net::IpAddr; + +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use serde::{self, Deserialize, Serialize}; + +use crate::tracker::{self, AnnounceResponse}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub interval: u32, + #[serde(rename = "min interval")] + pub interval_min: u32, + pub complete: u32, + pub incomplete: u32, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Peer { + pub peer_id: String, + pub ip: IpAddr, + pub port: u16, +} + +impl From for Peer { + fn from(peer: tracker::peer::Peer) -> Self { + Peer { + peer_id: peer.peer_id.to_string(), + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + } + } +} + +impl Announce { + /// # Panics + /// + /// It would panic if the `Announce` struct contained an inappropriate type. + #[must_use] + pub fn write(&self) -> String { + serde_bencode::to_string(&self).unwrap() + } +} + +impl IntoResponse for Announce { + fn into_response(self) -> Response { + (StatusCode::OK, self.write()).into_response() + } +} + +impl From for Announce { + fn from(domain_announce_response: AnnounceResponse) -> Self { + let peers: Vec = domain_announce_response.peers.iter().map(|peer| Peer::from(*peer)).collect(); + + Self { + interval: domain_announce_response.interval, + interval_min: domain_announce_response.interval_min, + complete: domain_announce_response.swam_stats.seeders, + incomplete: domain_announce_response.swam_stats.leechers, + peers, + } + } +} + +#[cfg(test)] +mod tests { + + use std::net::IpAddr; + use std::str::FromStr; + + use super::{Announce, Peer}; + + #[test] + fn announce_response_can_be_bencoded() { + let response = Announce { + interval: 1, + interval_min: 2, + complete: 3, + incomplete: 4, + peers: vec![Peer { + peer_id: "-qB00000000000000001".to_string(), + ip: IpAddr::from_str("127.0.0.1").unwrap(), + port: 8080, + }], + }; + + // cspell:disable-next-line + assert_eq!(response.write(), "d8:completei3e10:incompletei4e8:intervali1e12:min intervali2e5:peersld2:ip9:127.0.0.17:peer_id20:-qB000000000000000014:porti8080eeee"); + } +} diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs index d55a66679..ad7d0a78c 100644 --- a/src/http/axum_implementation/responses/mod.rs +++ b/src/http/axum_implementation/responses/mod.rs @@ -1,2 +1,3 @@ +pub mod announce; pub mod error; pub mod ok; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 48bd76128..cb3bd0e96 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -46,6 +46,8 @@ pub struct TorrentsMetrics { pub struct AnnounceResponse { pub peers: Vec, pub swam_stats: SwamStats, + pub interval: u32, + pub interval_min: u32, } impl Tracker { @@ -92,7 +94,12 @@ impl Tracker { // todo: remove peer by using its `Id` instead of its socket address: `get_peers_excluding_peer(peer_id: peer::Id)` let peers = self.get_peers_excluding_peers_with_address(info_hash, &peer.peer_addr).await; - AnnounceResponse { peers, swam_stats } + AnnounceResponse { + peers, + swam_stats, + interval: self.config.announce_interval, + interval_min: self.config.min_announce_interval, + } } /// # Errors diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index b1b7735dc..c01e0c4ee 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1470,8 +1470,7 @@ mod axum_http_tracker_server { start_ipv6_http_tracker, start_public_http_tracker, }; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1742,8 +1741,7 @@ mod axum_http_tracker_server { } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1768,8 +1766,7 @@ mod axum_http_tracker_server { .await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1793,7 +1790,7 @@ mod axum_http_tracker_server { ) .await; - // It should only contain teh previously announced peer + // It should only contain the previously announced peer assert_announce_response( response, &Announce { @@ -1807,8 +1804,7 @@ mod axum_http_tracker_server { .await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1872,8 +1868,7 @@ mod axum_http_tracker_server { assert_compact_announce_response(response, &expected_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_not_return_the_compact_response_by_default() { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. @@ -1912,8 +1907,7 @@ mod axum_http_tracker_server { compact_announce.is_ok() } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1926,8 +1920,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp4_connections_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; @@ -1960,8 +1953,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp6_connections_handled, 0); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1974,8 +1966,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp4_announces_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; @@ -2032,8 +2023,7 @@ mod axum_http_tracker_server { assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( ) { /* We assume that both the client and tracker share the same public IP. @@ -2065,8 +2055,7 @@ mod axum_http_tracker_server { assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( ) { /* We assume that both the client and tracker share the same public IP. @@ -2101,8 +2090,7 @@ mod axum_http_tracker_server { assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( ) { /* From b1612f6acef4ee717e77e19a6f72f9f539af620a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Feb 2023 14:12:03 +0000 Subject: [PATCH 325/435] test(http): improve tests --- src/http/axum_implementation/query.rs | 184 +++++++++++++----- .../axum_implementation/requests/announce.rs | 62 ++++-- 2 files changed, 178 insertions(+), 68 deletions(-) diff --git a/src/http/axum_implementation/query.rs b/src/http/axum_implementation/query.rs index 3c9c676f1..cad58c17b 100644 --- a/src/http/axum_implementation/query.rs +++ b/src/http/axum_implementation/query.rs @@ -3,7 +3,16 @@ use std::panic::Location; use std::str::FromStr; use thiserror::Error; + +/// Represent a URL query component with some restrictions. +/// It does not allow duplicate param names like this: `param1=value1¶m1=value2` +/// It would take the second value for `param1`. pub struct Query { + /* code-review: + - Consider using `HashMap`, because it does not allow you to add a second value for the same param name. + - Consider using a third-party crate. + - Conversion from/to string is not deterministic. Params can be in a different order in the query string. + */ params: HashMap, } @@ -33,6 +42,38 @@ impl FromStr for Query { } } +impl From> for Query { + fn from(raw_params: Vec<(&str, &str)>) -> Self { + let mut params: HashMap = HashMap::new(); + + for raw_param in raw_params { + params.insert(raw_param.0.to_owned(), raw_param.1.to_owned()); + } + + Self { params } + } +} + +impl std::fmt::Display for Query { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let query = self + .params + .iter() + .map(|param| format!("{}", Param::new(param.0, param.1))) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl Query { + #[must_use] + pub fn get_param(&self, name: &str) -> Option { + self.params.get(name).map(std::string::ToString::to_string) + } +} + #[derive(Debug, PartialEq)] struct Param { name: String, @@ -59,80 +100,121 @@ impl FromStr for Param { } } -impl Query { - #[must_use] - pub fn get_param(&self, name: &str) -> Option { - self.params.get(name).map(std::string::ToString::to_string) +impl std::fmt::Display for Param { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}={}", self.name, self.value) + } +} + +impl Param { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_owned(), + value: value.to_owned(), + } } } #[cfg(test)] mod tests { - use super::Query; - use crate::http::axum_implementation::query::Param; - #[test] - fn it_should_parse_the_query_params_from_an_url_query_string() { - let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + mod url_query { + use crate::http::axum_implementation::query::Query; - let query = raw_query.parse::().unwrap(); + #[test] + fn should_parse_the_query_params_from_an_url_query_string() { + let raw_query = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; - assert_eq!( - query.get_param("info_hash").unwrap(), - "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" - ); - assert_eq!(query.get_param("peer_id").unwrap(), "-qB00000000000000001"); - assert_eq!(query.get_param("port").unwrap(), "17548"); - } + let query = raw_query.parse::().unwrap(); - #[test] - fn it_should_fail_parsing_an_invalid_query_string() { - let invalid_raw_query = "name=value=value"; + assert_eq!( + query.get_param("info_hash").unwrap(), + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + ); + assert_eq!(query.get_param("peer_id").unwrap(), "-qB00000000000000001"); + assert_eq!(query.get_param("port").unwrap(), "17548"); + } - let query = invalid_raw_query.parse::(); + #[test] + fn should_fail_parsing_an_invalid_query_string() { + let invalid_raw_query = "name=value=value"; - assert!(query.is_err()); - } + let query = invalid_raw_query.parse::(); - #[test] - fn it_should_ignore_the_preceding_question_mark_if_it_exists() { - let raw_query = "?name=value"; + assert!(query.is_err()); + } - let query = raw_query.parse::().unwrap(); + #[test] + fn should_ignore_the_preceding_question_mark_if_it_exists() { + let raw_query = "?name=value"; - assert_eq!(query.get_param("name").unwrap(), "value"); - } + let query = raw_query.parse::().unwrap(); - #[test] - fn it_should_trim_whitespaces() { - let raw_query = " name=value "; + assert_eq!(query.get_param("name").unwrap(), "value"); + } - let query = raw_query.parse::().unwrap(); + #[test] + fn should_trim_whitespaces() { + let raw_query = " name=value "; - assert_eq!(query.get_param("name").unwrap(), "value"); - } + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name").unwrap(), "value"); + } + + #[test] + fn should_be_instantiated_from_a_string_pair_vector() { + let query = Query::from(vec![("param1", "value1"), ("param2", "value2")]).to_string(); - #[test] - fn it_should_parse_a_single_query_param() { - let raw_param = "name=value"; + assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + } - let param = raw_param.parse::().unwrap(); + #[test] + fn should_not_allow_more_than_one_value_for_the_same_param() { + let query = Query::from(vec![("param1", "value1"), ("param1", "value2"), ("param1", "value3")]).to_string(); - assert_eq!( - param, - Param { - name: "name".to_string(), - value: "value".to_string(), - } - ); + assert_eq!(query, "param1=value3"); + } + + #[test] + fn should_be_displayed() { + let query = "param1=value1¶m2=value2".parse::().unwrap().to_string(); + + assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + } } - #[test] - fn it_should_fail_parsing_an_invalid_query_param() { - let invalid_raw_param = "name=value=value"; + mod url_query_param { + use crate::http::axum_implementation::query::Param; + + #[test] + fn should_parse_a_single_query_param() { + let raw_param = "name=value"; + + let param = raw_param.parse::().unwrap(); - let query = invalid_raw_param.parse::(); + assert_eq!( + param, + Param { + name: "name".to_string(), + value: "value".to_string(), + } + ); + } + + #[test] + fn should_fail_parsing_an_invalid_query_param() { + let invalid_raw_param = "name=value=value"; + + let query = invalid_raw_param.parse::(); - assert!(query.is_err()); + assert!(query.is_err()); + } + + #[test] + fn should_be_displayed() { + assert_eq!("name=value".parse::().unwrap().to_string(), "name=value"); + } } } diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 34a9ad98a..b91945d0a 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -13,10 +13,13 @@ use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_pee use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; +pub type Bytes = u64; + pub struct ExtractAnnounceRequest(pub Announce); #[derive(Debug, PartialEq)] pub struct Announce { + // Mandatory params pub info_hash: InfoHash, pub peer_id: peer::Id, pub port: u16, @@ -157,26 +160,51 @@ where #[cfg(test)] mod tests { - use super::Announce; - use crate::http::axum_implementation::query::Query; - use crate::protocol::info_hash::InfoHash; - use crate::tracker::peer; - #[test] - fn announce_request_should_be_extracted_from_url_query_params() { - let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + mod announce_request { + + use crate::http::axum_implementation::query::Query; + use crate::http::axum_implementation::requests::announce::Announce; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn should_be_instantiated_from_url_query_params() { + let raw_query = Query::from(vec![ + ("info_hash", "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + ("peer_id", "-qB00000000000000001"), + ("port", "17548"), + ]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let announce_request = Announce::try_from(query).unwrap(); + + assert_eq!( + announce_request, + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + } + ); + } + + #[test] + fn should_fail_instantiating_from_url_query_params_if_the_query_does_not_include_all_the_mandatory_params() { + let raw_query_without_info_hash = "peer_id=-qB00000000000000001&port=17548"; - let query = raw_query.parse::().unwrap(); + assert!(Announce::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); - let announce_request = Announce::try_from(query).unwrap(); + let raw_query_without_peer_id = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&port=17548"; - assert_eq!( - announce_request, - Announce { - info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), - peer_id: "-qB00000000000000001".parse::().unwrap(), - port: 17548, - } - ); + assert!(Announce::try_from(raw_query_without_peer_id.parse::().unwrap()).is_err()); + + let raw_query_without_port = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001"; + + assert!(Announce::try_from(raw_query_without_port.parse::().unwrap()).is_err()); + } } } From 74ed59221ac22c3cd8fb0accf2a48375201d75d1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Feb 2023 19:31:21 +0000 Subject: [PATCH 326/435] feat(http): [#184] added optional params to announce req in Axum implementation --- .../axum_implementation/handlers/announce.rs | 26 +- .../axum_implementation/requests/announce.rs | 407 +++++++++++++++--- tests/http_tracker.rs | 67 ++- 3 files changed, 401 insertions(+), 99 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 3ae0b7334..92bce5a4f 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -5,8 +5,9 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use axum::extract::State; use axum::response::{IntoResponse, Response}; use axum_client_ip::SecureClientIp; +use log::debug; -use crate::http::axum_implementation::requests::announce::{Announce, ExtractAnnounceRequest}; +use crate::http::axum_implementation::requests::announce::{Announce, Event, ExtractAnnounceRequest}; use crate::http::axum_implementation::responses; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; @@ -19,7 +20,7 @@ pub async fn handle( ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, secure_ip: SecureClientIp, ) -> Response { - // todo: compact response and optional params + debug!("http announce request: {:#?}", announce_request); let info_hash = announce_request.info_hash; let remote_client_ip = secure_ip.0; @@ -42,15 +43,24 @@ pub async fn handle( #[must_use] fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { - #[allow(clippy::cast_possible_truncation)] Peer { peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port), updated: Current::now(), - // todo: optional parameters not included in the announce request yet - uploaded: NumberOfBytes(i128::from(0) as i64), - downloaded: NumberOfBytes(i128::from(0) as i64), - left: NumberOfBytes(i128::from(0) as i64), - event: AnnounceEvent::None, + uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), + downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), + left: NumberOfBytes(announce_request.left.unwrap_or(0)), + event: map_to_aquatic_event(&announce_request.event), + } +} + +fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => aquatic_udp_protocol::AnnounceEvent::Started, + Event::Stopped => aquatic_udp_protocol::AnnounceEvent::Stopped, + Event::Completed => aquatic_udp_protocol::AnnounceEvent::Completed, + }, + None => aquatic_udp_protocol::AnnounceEvent::None, } } diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index b91945d0a..36e94a3fd 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -1,3 +1,4 @@ +use std::fmt; use std::panic::Location; use std::str::FromStr; @@ -10,60 +11,139 @@ use thiserror::Error; use crate::http::axum_implementation::query::{ParseQueryError, Query}; use crate::http::axum_implementation::responses; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::located_error::{Located, LocatedError}; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; -pub type Bytes = u64; +pub type NumberOfBytes = i64; pub struct ExtractAnnounceRequest(pub Announce); +// Param names in the URL query +const INFO_HASH: &str = "info_hash"; +const PEER_ID: &str = "peer_id"; +const PORT: &str = "port"; +const DOWNLOADED: &str = "downloaded"; +const UPLOADED: &str = "uploaded"; +const LEFT: &str = "left"; +const EVENT: &str = "event"; +const COMPACT: &str = "compact"; + #[derive(Debug, PartialEq)] pub struct Announce { // Mandatory params pub info_hash: InfoHash, pub peer_id: peer::Id, pub port: u16, + // Optional params + pub downloaded: Option, + pub uploaded: Option, + pub left: Option, + pub event: Option, + pub compact: Option, } -#[derive(Error, Debug)] -pub enum ParseAnnounceQueryError { - #[error("missing info_hash param: {location}")] - MissingInfoHash { location: &'static Location<'static> }, - #[error("invalid info_hash param: {location}")] - InvalidInfoHash { location: &'static Location<'static> }, - #[error("missing peer_id param: {location}")] - MissingPeerId { location: &'static Location<'static> }, - #[error("invalid peer_id param: {location}")] - InvalidPeerId { location: &'static Location<'static> }, - #[error("missing port param: {location}")] - MissingPort { location: &'static Location<'static> }, - #[error("invalid port param: {location}")] - InvalidPort { location: &'static Location<'static> }, -} - -impl From for ParseAnnounceQueryError { - #[track_caller] - fn from(_err: IdConversionError) -> Self { - Self::InvalidPeerId { - location: Location::caller(), +#[derive(PartialEq, Debug)] +pub enum Event { + Started, + Stopped, + Completed, +} + +impl FromStr for Event { + type Err = ParseAnnounceQueryError; + + fn from_str(raw_param: &str) -> Result { + match raw_param { + "started" => Ok(Self::Started), + "stopped" => Ok(Self::Stopped), + "completed" => Ok(Self::Completed), + _ => Err(ParseAnnounceQueryError::InvalidParam { + param_name: EVENT.to_owned(), + param_value: raw_param.to_owned(), + location: Location::caller(), + }), } } } -impl From for ParseAnnounceQueryError { - #[track_caller] - fn from(_err: ConversionError) -> Self { - Self::InvalidInfoHash { - location: Location::caller(), +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Event::Started => write!(f, "started"), + Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(PartialEq, Debug)] +pub enum Compact { + Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), } } } +impl FromStr for Compact { + type Err = ParseAnnounceQueryError; + + fn from_str(raw_param: &str) -> Result { + match raw_param { + "1" => Ok(Self::Accepted), + "0" => Ok(Self::NotAccepted), + _ => Err(ParseAnnounceQueryError::InvalidParam { + param_name: COMPACT.to_owned(), + param_value: raw_param.to_owned(), + location: Location::caller(), + }), + } + } +} + +#[derive(Error, Debug)] +pub enum ParseAnnounceQueryError { + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + #[error("invalid param value {param_value} for {param_name} in {location}")] + InvalidParam { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("param value overflow {param_value} for {param_name} in {location}")] + NumberOfBytesOverflow { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, ConversionError>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidPeerIdParam { + param_name: String, + param_value: String, + source: LocatedError<'static, IdConversionError>, + }, +} + impl From for responses::error::Error { fn from(err: ParseQueryError) -> Self { responses::error::Error { - // code-review: should we expose error location in public HTTP tracker API? - // Error message example: "Cannot parse query params: invalid param a=b=c in src/http/axum_implementation/query.rs:50:27" failure_reason: format!("Cannot parse query params: {err}"), } } @@ -72,7 +152,6 @@ impl From for responses::error::Error { impl From for responses::error::Error { fn from(err: ParseAnnounceQueryError) -> Self { responses::error::Error { - // code-review: should we expose error location in public HTTP tracker API? failure_reason: format!("Cannot parse query params for announce request: {err}"), } } @@ -86,45 +165,120 @@ impl TryFrom for Announce { info_hash: extract_info_hash(&query)?, peer_id: extract_peer_id(&query)?, port: extract_port(&query)?, + downloaded: extract_downloaded(&query)?, + uploaded: extract_uploaded(&query)?, + left: extract_left(&query)?, + event: extract_event(&query)?, + compact: extract_compact(&query)?, }) } } +// Mandatory params + fn extract_info_hash(query: &Query) -> Result { - match query.get_param("info_hash") { - Some(raw_info_hash) => Ok(percent_decode_info_hash(&raw_info_hash)?), + match query.get_param(INFO_HASH) { + Some(raw_param) => { + Ok( + percent_decode_info_hash(&raw_param).map_err(|err| ParseAnnounceQueryError::InvalidInfoHashParam { + param_name: INFO_HASH.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?, + ) + } None => { - return Err(ParseAnnounceQueryError::MissingInfoHash { + return Err(ParseAnnounceQueryError::MissingParam { location: Location::caller(), + param_name: INFO_HASH.to_owned(), }) } } } fn extract_peer_id(query: &Query) -> Result { - match query.get_param("peer_id") { - Some(raw_peer_id) => Ok(percent_decode_peer_id(&raw_peer_id)?), + match query.get_param(PEER_ID) { + Some(raw_param) => Ok( + percent_decode_peer_id(&raw_param).map_err(|err| ParseAnnounceQueryError::InvalidPeerIdParam { + param_name: PEER_ID.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?, + ), None => { - return Err(ParseAnnounceQueryError::MissingPeerId { + return Err(ParseAnnounceQueryError::MissingParam { location: Location::caller(), + param_name: PEER_ID.to_owned(), }) } } } fn extract_port(query: &Query) -> Result { - match query.get_param("port") { - Some(raw_port) => Ok(u16::from_str(&raw_port).map_err(|_e| ParseAnnounceQueryError::InvalidPort { + match query.get_param(PORT) { + Some(raw_param) => Ok(u16::from_str(&raw_param).map_err(|_e| ParseAnnounceQueryError::InvalidParam { + param_name: PORT.to_owned(), + param_value: raw_param.clone(), location: Location::caller(), })?), None => { - return Err(ParseAnnounceQueryError::MissingPort { + return Err(ParseAnnounceQueryError::MissingParam { location: Location::caller(), + param_name: PORT.to_owned(), }) } } } +// Optional params + +fn extract_downloaded(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(DOWNLOADED, query) +} + +fn extract_uploaded(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(UPLOADED, query) +} + +fn extract_left(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(LEFT, query) +} + +fn extract_number_of_bytes_from_param(param_name: &str, query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(param_name) { + Some(raw_param) => { + let number_of_bytes = u64::from_str(&raw_param).map_err(|_e| ParseAnnounceQueryError::InvalidParam { + param_name: param_name.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + })?; + + Ok(Some(i64::try_from(number_of_bytes).map_err(|_e| { + ParseAnnounceQueryError::NumberOfBytesOverflow { + param_name: param_name.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + } + })?)) + } + None => Ok(None), + } +} + +fn extract_event(query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(EVENT) { + Some(raw_param) => Ok(Some(Event::from_str(&raw_param)?)), + None => Ok(None), + } +} + +fn extract_compact(query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(COMPACT) { + Some(raw_param) => Ok(Some(Compact::from_str(&raw_param)?)), + None => Ok(None), + } +} + #[async_trait] impl FromRequestParts for ExtractAnnounceRequest where @@ -164,16 +318,18 @@ mod tests { mod announce_request { use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::announce::Announce; + use crate::http::axum_implementation::requests::announce::{ + Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, + }; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; #[test] - fn should_be_instantiated_from_url_query_params() { + fn should_be_instantiated_from_the_url_query_with_only_the_mandatory_params() { let raw_query = Query::from(vec![ - ("info_hash", "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), - ("peer_id", "-qB00000000000000001"), - ("port", "17548"), + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), ]) .to_string(); @@ -187,24 +343,171 @@ mod tests { info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), peer_id: "-qB00000000000000001".parse::().unwrap(), port: 17548, + downloaded: None, + uploaded: None, + left: None, + event: None, + compact: None, } ); } #[test] - fn should_fail_instantiating_from_url_query_params_if_the_query_does_not_include_all_the_mandatory_params() { - let raw_query_without_info_hash = "peer_id=-qB00000000000000001&port=17548"; + fn should_be_instantiated_from_the_url_query_params() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (DOWNLOADED, "1"), + (UPLOADED, "2"), + (LEFT, "3"), + (EVENT, "started"), + (COMPACT, "0"), + ]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let announce_request = Announce::try_from(query).unwrap(); + + assert_eq!( + announce_request, + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + downloaded: Some(1), + uploaded: Some(2), + left: Some(3), + event: Some(Event::Started), + compact: Some(Compact::NotAccepted), + } + ); + } + + mod when_it_is_instantiated_from_the_url_query_params { + + use crate::http::axum_implementation::query::Query; + use crate::http::axum_implementation::requests::announce::{ + Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, + }; - assert!(Announce::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); + #[test] + fn it_should_fail_if_the_query_does_not_include_all_the_mandatory_params() { + let raw_query_without_info_hash = "peer_id=-qB00000000000000001&port=17548"; - let raw_query_without_peer_id = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&port=17548"; + assert!(Announce::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); - assert!(Announce::try_from(raw_query_without_peer_id.parse::().unwrap()).is_err()); + let raw_query_without_peer_id = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&port=17548"; - let raw_query_without_port = - "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001"; + assert!(Announce::try_from(raw_query_without_peer_id.parse::().unwrap()).is_err()); - assert!(Announce::try_from(raw_query_without_port.parse::().unwrap()).is_err()); + let raw_query_without_port = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001"; + + assert!(Announce::try_from(raw_query_without_port.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_info_hash_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "INVALID_INFO_HASH_VALUE"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_peer_id_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "INVALID_PEER_ID_VALUE"), + (PORT, "17548"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_port_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "INVALID_PORT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_downloaded_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (DOWNLOADED, "INVALID_DOWNLOADED_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_uploaded_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (UPLOADED, "INVALID_UPLOADED_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_left_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (LEFT, "INVALID_LEFT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_event_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (EVENT, "INVALID_EVENT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_compact_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (COMPACT, "INVALID_COMPACT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } } } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index c01e0c4ee..650bc447e 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1458,8 +1458,8 @@ mod axum_http_tracker_server { use crate::http::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, - assert_compact_announce_response, assert_empty_announce_response, assert_internal_server_error_response, - assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, + assert_compact_announce_response, assert_empty_announce_response, assert_is_announce_response, + assert_missing_query_params_for_announce_request_error_response, }; use crate::http::client::Client; use crate::http::requests::announce::{Compact, QueryBuilder}; @@ -1521,7 +1521,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_bad_announce_request_error_response(response, "missing info_hash param").await; + assert_bad_announce_request_error_response(response, "missing param info_hash").await; // Without `peer_id` param @@ -1533,7 +1533,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_bad_announce_request_error_response(response, "missing peer_id param").await; + assert_bad_announce_request_error_response(response, "missing param peer_id").await; // Without `port` param @@ -1545,7 +1545,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_bad_announce_request_error_response(response, "missing port param").await; + assert_bad_announce_request_error_response(response, "missing param port").await; } #[tokio::test] @@ -1565,13 +1565,12 @@ mod axum_http_tracker_server { } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_not_fail_when_the_peer_address_param_is_invalid() { // AnnounceQuery does not even contain the `peer_addr` // The peer IP is obtained in two ways: - // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. - // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1586,8 +1585,7 @@ mod axum_http_tracker_server { assert_is_announce_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1602,12 +1600,11 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1622,7 +1619,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } @@ -1648,7 +1645,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_cannot_parse_query_params_error_response(response, "").await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } @@ -1667,12 +1664,11 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_cannot_parse_query_params_error_response(response, "").await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; @@ -1687,15 +1683,12 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } - //#[tokio::test] - #[allow(dead_code)] - async fn should_not_fail_when_the_event_param_is_invalid() { - // All invalid values are ignored as if the `event` param were empty - + #[tokio::test] + async fn should_fail_when_the_event_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; let mut params = QueryBuilder::default().query().params(); @@ -1705,9 +1698,9 @@ mod axum_http_tracker_server { "-1", "1.1", "a", - "Started", // It should be lowercase - "Stopped", // It should be lowercase - "Completed", // It should be lowercase + "Started", // It should be lowercase to be valid: `started` + "Stopped", // It should be lowercase to be valid: `stopped` + "Completed", // It should be lowercase to be valid: `completed` ]; for invalid_value in invalid_values { @@ -1717,13 +1710,12 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_is_announce_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } - //#[tokio::test] - #[allow(dead_code)] - async fn should_not_fail_when_the_compact_param_is_invalid() { + #[tokio::test] + async fn should_fail_when_the_compact_param_is_invalid() { let http_tracker_server = start_default_http_tracker(Version::Axum).await; let mut params = QueryBuilder::default().query().params(); @@ -1737,7 +1729,7 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_internal_server_error_response(response).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; } } @@ -1933,8 +1925,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp6_connections_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. @@ -1979,8 +1970,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp6_announces_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. @@ -1999,8 +1989,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp6_announces_handled, 0); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; From 02e25168c77edf1e6ad4bfa17803ee18883c2c0d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Feb 2023 20:54:40 +0000 Subject: [PATCH 327/435] feat(http): [#184] Axum extractor for peer IP It uses a wrapper for another extractor becuase that extractor cannot be optional. We need to get the rigth most IP in the X-Forwarded-For header only when the tracker is runnin gon reverse proxy. More info: https://github.com/imbolc/axum-client-ip/issues/9#issuecomment-1433039362 --- .../axum_implementation/extractors/mod.rs | 2 + .../axum_implementation/extractors/peer_ip.rs | 52 +++++++++++++++++++ .../extractors/remote_client_ip.rs | 51 ++++++++++++++++++ .../axum_implementation/handlers/announce.rs | 20 ++++--- .../axum_implementation/handlers/status.rs | 6 +-- src/http/axum_implementation/mod.rs | 1 + .../axum_implementation/requests/announce.rs | 8 +-- src/http/axum_implementation/resources/ok.rs | 7 ++- src/http/axum_implementation/responses/ok.rs | 8 ++- src/http/axum_implementation/routes.rs | 8 +-- tests/http_tracker.rs | 31 +++++++---- 11 files changed, 156 insertions(+), 38 deletions(-) create mode 100644 src/http/axum_implementation/extractors/mod.rs create mode 100644 src/http/axum_implementation/extractors/peer_ip.rs create mode 100644 src/http/axum_implementation/extractors/remote_client_ip.rs diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs new file mode 100644 index 000000000..71ceea999 --- /dev/null +++ b/src/http/axum_implementation/extractors/mod.rs @@ -0,0 +1,2 @@ +pub mod peer_ip; +pub mod remote_client_ip; diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs new file mode 100644 index 000000000..3f76dc67c --- /dev/null +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -0,0 +1,52 @@ +use std::net::IpAddr; +use std::panic::Location; + +use axum::response::{IntoResponse, Response}; +use thiserror::Error; + +use super::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::responses; + +#[derive(Error, Debug)] +pub enum ResolutionError { + #[error("missing the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}")] + MissingRightMostXForwardedForIp { location: &'static Location<'static> }, + #[error("cannot get the client IP from the connection info in {location}")] + MissingClientIp { location: &'static Location<'static> }, +} + +impl From for responses::error::Error { + fn from(err: ResolutionError) -> Self { + responses::error::Error { + failure_reason: format!("{err}"), + } + } +} + +/// It resolves the peer IP. +/// +/// # Errors +/// +/// Will return an error if the peer IP cannot be obtained according to the configuration. +/// For example, if the IP is extracted from an HTTP header which is missing in the request. +pub fn peer_ip(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { + if on_reverse_proxy { + if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { + Ok(ip) + } else { + Err( + responses::error::Error::from(ResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }) + .into_response(), + ) + } + } else if let Some(ip) = remote_client_ip.connection_info_ip { + Ok(ip) + } else { + Err(responses::error::Error::from(ResolutionError::MissingClientIp { + location: Location::caller(), + }) + .into_response()) + } +} diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/remote_client_ip.rs new file mode 100644 index 000000000..7b6f3fed2 --- /dev/null +++ b/src/http/axum_implementation/extractors/remote_client_ip.rs @@ -0,0 +1,51 @@ +use std::net::{IpAddr, SocketAddr}; + +use axum::async_trait; +use axum::extract::{ConnectInfo, FromRequestParts}; +use axum::http::request::Parts; +use axum::response::Response; +use axum_client_ip::RightmostXForwardedFor; +use serde::{Deserialize, Serialize}; + +/// Given this request chain: +/// +/// client <-> http proxy 1 <-> http proxy 2 <-> server +/// ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 +/// X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 +/// +/// This extractor extracts these values from the HTTP headers and connection info. +/// +/// `right_most_x_forwarded_for` = 126.0.0.2 +/// `connection_info_ip` = 126.0.0.1 +/// +/// More info about inner extractors : +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct RemoteClientIp { + pub right_most_x_forwarded_for: Option, + pub connection_info_ip: Option, +} + +#[async_trait] +impl FromRequestParts for RemoteClientIp +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + let right_most_x_forwarded_for = match RightmostXForwardedFor::from_request_parts(parts, state).await { + Ok(right_most_x_forwarded_for) => Some(right_most_x_forwarded_for.0), + Err(_) => None, + }; + + let connection_info_ip = match ConnectInfo::::from_request_parts(parts, state).await { + Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), + Err(_) => None, + }; + + Ok(RemoteClientIp { + right_most_x_forwarded_for, + connection_info_ip, + }) + } +} diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 92bce5a4f..af70b87e7 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -4,32 +4,38 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use axum_client_ip::SecureClientIp; use log::debug; +use crate::http::axum_implementation::extractors::peer_ip::peer_ip; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::requests::announce::{Announce, Event, ExtractAnnounceRequest}; use crate::http::axum_implementation::responses; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; use crate::tracker::{statistics, Tracker}; -/// WIP #[allow(clippy::unused_async)] pub async fn handle( State(tracker): State>, ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, - secure_ip: SecureClientIp, + remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); let info_hash = announce_request.info_hash; - let remote_client_ip = secure_ip.0; - let mut peer = peer_from_request(&announce_request, &remote_client_ip); + let peer_ip = peer_ip(tracker.config.on_reverse_proxy, &remote_client_ip); - let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; + let peer_ip = match peer_ip { + Ok(peer_ip) => peer_ip, + Err(err) => return err, + }; - match remote_client_ip { + let mut peer = peer_from_request(&announce_request, &peer_ip); + + let response = tracker.announce(&info_hash, &mut peer, &peer_ip).await; + + match peer_ip { IpAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Tcp4Announce).await; } diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs index 37d88321c..d4031aef5 100644 --- a/src/http/axum_implementation/handlers/status.rs +++ b/src/http/axum_implementation/handlers/status.rs @@ -1,12 +1,12 @@ /// Temporary handler for testing and debugging the new Axum implementation /// It should be removed once the migration to Axum is finished. use axum::response::Json; -use axum_client_ip::{InsecureClientIp, SecureClientIp}; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::resources::ok::Ok; use crate::http::axum_implementation::responses::ok; #[allow(clippy::unused_async)] -pub async fn get_status_handler(insecure_ip: InsecureClientIp, secure_ip: SecureClientIp) -> Json { - ok::response(&insecure_ip.0, &secure_ip.0) +pub async fn get_status_handler(remote_client_ip: RemoteClientIp) -> Json { + ok::response(&remote_client_ip) } diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index 9e5e07979..4b7d90e60 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -1,3 +1,4 @@ +pub mod extractors; pub mod handlers; pub mod query; pub mod requests; diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 36e94a3fd..463df4fbe 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -110,6 +110,8 @@ impl FromStr for Compact { #[derive(Error, Debug)] pub enum ParseAnnounceQueryError { + #[error("missing query params for announce request in {location}")] + MissingParams { location: &'static Location<'static> }, #[error("missing param {param_name} in {location}")] MissingParam { location: &'static Location<'static>, @@ -290,9 +292,9 @@ where let raw_query = parts.uri.query(); if raw_query.is_none() { - return Err(responses::error::Error { - failure_reason: "missing query params for announce request".to_string(), - } + return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { + location: Location::caller(), + }) .into_response()); } diff --git a/src/http/axum_implementation/resources/ok.rs b/src/http/axum_implementation/resources/ok.rs index 4a3495d0f..f941b9fb3 100644 --- a/src/http/axum_implementation/resources/ok.rs +++ b/src/http/axum_implementation/resources/ok.rs @@ -1,9 +1,8 @@ -use std::net::IpAddr; - use serde::{Deserialize, Serialize}; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; + #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Ok { - pub remote_client_insecure_ip: IpAddr, - pub remote_client_secure_ip: IpAddr, + pub remote_client_ip: RemoteClientIp, } diff --git a/src/http/axum_implementation/responses/ok.rs b/src/http/axum_implementation/responses/ok.rs index a2d61749d..dfd062b51 100644 --- a/src/http/axum_implementation/responses/ok.rs +++ b/src/http/axum_implementation/responses/ok.rs @@ -1,13 +1,11 @@ -use std::net::IpAddr; - use axum::Json; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::resources::ok::Ok; #[must_use] -pub fn response(remote_client_insecure_ip: &IpAddr, remote_client_secure_ip: &IpAddr) -> Json { +pub fn response(remote_client_ip: &RemoteClientIp) -> Json { Json(Ok { - remote_client_insecure_ip: *remote_client_insecure_ip, - remote_client_secure_ip: *remote_client_secure_ip, + remote_client_ip: remote_client_ip.clone(), }) } diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index a32a60ec0..6138f5acf 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -9,16 +9,10 @@ use super::handlers::status::get_status_handler; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { - let secure_client_ip_source = if tracker.config.on_reverse_proxy { - SecureClientIpSource::RightmostXForwardedFor - } else { - SecureClientIpSource::ConnectInfo - }; - Router::new() // Status .route("/status", get(get_status_handler)) // Announce request .route("/announce", get(handle).with_state(tracker.clone())) - .layer(secure_client_ip_source.into_extension()) + .layer(SecureClientIpSource::ConnectInfo.into_extension()) } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 650bc447e..ded30a0b4 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1264,6 +1264,7 @@ mod axum_http_tracker_server { // WIP: migration HTTP from Warp to Axum use local_ip_address::local_ip; + use torrust_tracker::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use torrust_tracker::http::axum_implementation::resources::ok::Ok; use torrust_tracker::http::Version; @@ -1287,8 +1288,10 @@ mod axum_http_tracker_server { assert_eq!( ok, Ok { - remote_client_insecure_ip: client_ip, - remote_client_secure_ip: client_ip + remote_client_ip: RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: Some(client_ip) + } } ); } @@ -1302,6 +1305,7 @@ mod axum_http_tracker_server { use std::str::FromStr; use local_ip_address::local_ip; + use torrust_tracker::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use torrust_tracker::http::axum_implementation::resources::ok::Ok; use torrust_tracker::http::Version; @@ -1323,8 +1327,10 @@ mod axum_http_tracker_server { assert_eq!( ok, Ok { - remote_client_insecure_ip: client_ip, - remote_client_secure_ip: client_ip + remote_client_ip: RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: Some(client_ip) + } } ); } @@ -1345,8 +1351,10 @@ mod axum_http_tracker_server { assert_eq!( ok, Ok { - remote_client_insecure_ip: client_ip, - remote_client_secure_ip: client_ip + remote_client_ip: RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: Some(client_ip) + } } ); } @@ -1362,7 +1370,10 @@ mod axum_http_tracker_server { let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; - let client = Client::new(http_tracker_server.get_connection_info()); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); let left_most_ip = IpAddr::from_str("203.0.113.195").unwrap(); let right_most_ip = IpAddr::from_str("150.172.238.178").unwrap(); @@ -1380,8 +1391,10 @@ mod axum_http_tracker_server { assert_eq!( ok, Ok { - remote_client_insecure_ip: left_most_ip, - remote_client_secure_ip: right_most_ip + remote_client_ip: RemoteClientIp { + right_most_x_forwarded_for: Some(right_most_ip), + connection_info_ip: Some(client_ip) + } } ); } From 99dbbe41f6576e9f075ba1bfa82c4361da5c0c38 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Feb 2023 21:43:33 +0000 Subject: [PATCH 328/435] refactor(http): [#184] extract announce service in Axum tracker --- .../axum_implementation/extractors/peer_ip.rs | 2 +- .../axum_implementation/handlers/announce.rs | 24 +++++-------------- src/http/axum_implementation/mod.rs | 1 + .../axum_implementation/services/announce.rs | 24 +++++++++++++++++++ src/http/axum_implementation/services/mod.rs | 1 + src/http/warp_implementation/handlers.rs | 3 +++ 6 files changed, 36 insertions(+), 19 deletions(-) create mode 100644 src/http/axum_implementation/services/announce.rs create mode 100644 src/http/axum_implementation/services/mod.rs diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs index 3f76dc67c..7d615d0dc 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -29,7 +29,7 @@ impl From for responses::error::Error { /// /// Will return an error if the peer IP cannot be obtained according to the configuration. /// For example, if the IP is extracted from an HTTP header which is missing in the request. -pub fn peer_ip(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { +pub fn assign_ip_address_to_peer(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { if on_reverse_proxy { if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { Ok(ip) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index af70b87e7..1fb111b8b 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -6,13 +6,13 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::axum_implementation::extractors::peer_ip::peer_ip; +use crate::http::axum_implementation::extractors::peer_ip::assign_ip_address_to_peer; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::requests::announce::{Announce, Event, ExtractAnnounceRequest}; -use crate::http::axum_implementation::responses; +use crate::http::axum_implementation::{responses, services}; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; -use crate::tracker::{statistics, Tracker}; +use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn handle( @@ -22,31 +22,19 @@ pub async fn handle( ) -> Response { debug!("http announce request: {:#?}", announce_request); - let info_hash = announce_request.info_hash; - - let peer_ip = peer_ip(tracker.config.on_reverse_proxy, &remote_client_ip); - - let peer_ip = match peer_ip { + let peer_ip = match assign_ip_address_to_peer(tracker.config.on_reverse_proxy, &remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, }; let mut peer = peer_from_request(&announce_request, &peer_ip); - let response = tracker.announce(&info_hash, &mut peer, &peer_ip).await; - - match peer_ip { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Tcp4Announce).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Tcp6Announce).await; - } - } + let response = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; responses::announce::Announce::from(response).into_response() } +/// It ignores the peer address in the announce request params. #[must_use] fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { Peer { diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index 4b7d90e60..d8431457a 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -6,3 +6,4 @@ pub mod resources; pub mod responses; pub mod routes; pub mod server; +pub mod services; diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs new file mode 100644 index 000000000..9481354ba --- /dev/null +++ b/src/http/axum_implementation/services/announce.rs @@ -0,0 +1,24 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use crate::protocol::info_hash::InfoHash; +use crate::tracker::peer::Peer; +use crate::tracker::{statistics, AnnounceResponse, Tracker}; + +pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceResponse { + let original_peer_ip = peer.peer_addr.ip(); + + // The tracker could change the original peer ip + let response = tracker.announce(&info_hash, peer, &original_peer_ip).await; + + match original_peer_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Announce).await; + } + } + + response +} diff --git a/src/http/axum_implementation/services/mod.rs b/src/http/axum_implementation/services/mod.rs new file mode 100644 index 000000000..74894de33 --- /dev/null +++ b/src/http/axum_implementation/services/mod.rs @@ -0,0 +1 @@ +pub mod announce; diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index fd927150f..400cc5762 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -49,6 +49,9 @@ pub async fn handle_announce( let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); + // todo: we should be use the http::axum_implementation::services::announce::announce service, + // but this Warp implementation is going to be removed. + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; match remote_client_ip { From 30918daf1aa147ceaa48ee95ef87427de754ff4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Feb 2023 22:41:41 +0000 Subject: [PATCH 329/435] refactor(http): [#184] move extractor to extractor mod --- .../extractors/announce_request.rs | 45 +++++++ .../axum_implementation/extractors/mod.rs | 1 + .../axum_implementation/handlers/announce.rs | 5 +- .../axum_implementation/requests/announce.rs | 111 ++++++------------ src/tracker/peer.rs | 2 + 5 files changed, 87 insertions(+), 77 deletions(-) create mode 100644 src/http/axum_implementation/extractors/announce_request.rs diff --git a/src/http/axum_implementation/extractors/announce_request.rs b/src/http/axum_implementation/extractors/announce_request.rs new file mode 100644 index 000000000..0371be9a4 --- /dev/null +++ b/src/http/axum_implementation/extractors/announce_request.rs @@ -0,0 +1,45 @@ +use std::panic::Location; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; + +use crate::http::axum_implementation::query::Query; +use crate::http::axum_implementation::requests::announce::{Announce, ParseAnnounceQueryError}; +use crate::http::axum_implementation::responses; + +pub struct ExtractRequest(pub Announce); + +#[async_trait] +impl FromRequestParts for ExtractRequest +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let raw_query = parts.uri.query(); + + if raw_query.is_none() { + return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { + location: Location::caller(), + }) + .into_response()); + } + + let query = raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error).into_response()); + } + + let announce_request = Announce::try_from(query.unwrap()); + + if let Err(error) = announce_request { + return Err(responses::error::Error::from(error).into_response()); + } + + Ok(ExtractRequest(announce_request.unwrap())) + } +} diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 71ceea999..65b2775a9 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,2 +1,3 @@ +pub mod announce_request; pub mod peer_ip; pub mod remote_client_ip; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 1fb111b8b..0960510ba 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -6,9 +6,10 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; +use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::peer_ip::assign_ip_address_to_peer; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::requests::announce::{Announce, Event, ExtractAnnounceRequest}; +use crate::http::axum_implementation::requests::announce::{Announce, Event}; use crate::http::axum_implementation::{responses, services}; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; @@ -17,7 +18,7 @@ use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn handle( State(tracker): State>, - ExtractAnnounceRequest(announce_request): ExtractAnnounceRequest, + ExtractRequest(announce_request): ExtractRequest, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 463df4fbe..0f9a6fbfe 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -2,10 +2,6 @@ use std::fmt; use std::panic::Location; use std::str::FromStr; -use axum::async_trait; -use axum::extract::FromRequestParts; -use axum::http::request::Parts; -use axum::response::{IntoResponse, Response}; use thiserror::Error; use crate::http::axum_implementation::query::{ParseQueryError, Query}; @@ -17,9 +13,7 @@ use crate::tracker::peer::{self, IdConversionError}; pub type NumberOfBytes = i64; -pub struct ExtractAnnounceRequest(pub Announce); - -// Param names in the URL query +// Query param names const INFO_HASH: &str = "info_hash"; const PEER_ID: &str = "peer_id"; const PORT: &str = "port"; @@ -43,6 +37,41 @@ pub struct Announce { pub compact: Option, } +#[derive(Error, Debug)] +pub enum ParseAnnounceQueryError { + #[error("missing query params for announce request in {location}")] + MissingParams { location: &'static Location<'static> }, + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + #[error("invalid param value {param_value} for {param_name} in {location}")] + InvalidParam { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("param value overflow {param_value} for {param_name} in {location}")] + NumberOfBytesOverflow { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, ConversionError>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidPeerIdParam { + param_name: String, + param_value: String, + source: LocatedError<'static, IdConversionError>, + }, +} + #[derive(PartialEq, Debug)] pub enum Event { Started, @@ -108,41 +137,6 @@ impl FromStr for Compact { } } -#[derive(Error, Debug)] -pub enum ParseAnnounceQueryError { - #[error("missing query params for announce request in {location}")] - MissingParams { location: &'static Location<'static> }, - #[error("missing param {param_name} in {location}")] - MissingParam { - location: &'static Location<'static>, - param_name: String, - }, - #[error("invalid param value {param_value} for {param_name} in {location}")] - InvalidParam { - param_name: String, - param_value: String, - location: &'static Location<'static>, - }, - #[error("param value overflow {param_value} for {param_name} in {location}")] - NumberOfBytesOverflow { - param_name: String, - param_value: String, - location: &'static Location<'static>, - }, - #[error("invalid param value {param_value} for {param_name} in {source}")] - InvalidInfoHashParam { - param_name: String, - param_value: String, - source: LocatedError<'static, ConversionError>, - }, - #[error("invalid param value {param_value} for {param_name} in {source}")] - InvalidPeerIdParam { - param_name: String, - param_value: String, - source: LocatedError<'static, IdConversionError>, - }, -} - impl From for responses::error::Error { fn from(err: ParseQueryError) -> Self { responses::error::Error { @@ -281,39 +275,6 @@ fn extract_compact(query: &Query) -> Result, ParseAnnounceQueryE } } -#[async_trait] -impl FromRequestParts for ExtractAnnounceRequest -where - S: Send + Sync, -{ - type Rejection = Response; - - async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - let raw_query = parts.uri.query(); - - if raw_query.is_none() { - return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { - location: Location::caller(), - }) - .into_response()); - } - - let query = raw_query.unwrap().parse::(); - - if let Err(error) = query { - return Err(responses::error::Error::from(error).into_response()); - } - - let announce_request = Announce::try_from(query.unwrap()); - - if let Err(error) = announce_request { - return Err(responses::error::Error::from(error).into_response()); - } - - Ok(ExtractAnnounceRequest(announce_request.unwrap())) - } -} - #[cfg(test)] mod tests { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 7559463db..735754529 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -22,6 +22,8 @@ pub struct Peer { pub downloaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] pub left: NumberOfBytes, // The number of bytes this peer still has to download + // code-review: aquatic_udp_protocol::request::AnnounceEvent is used also for the HTTP tracker. + // Maybe we should use our own enum and use theĀ”is one only for the UDP tracker. #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } From da638d603ac8d43365361cfd8a101ba581d67325 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Feb 2023 22:47:39 +0000 Subject: [PATCH 330/435] docs(http): fix extractor docs --- src/http/axum_implementation/extractors/remote_client_ip.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/remote_client_ip.rs index 7b6f3fed2..e852a1b6f 100644 --- a/src/http/axum_implementation/extractors/remote_client_ip.rs +++ b/src/http/axum_implementation/extractors/remote_client_ip.rs @@ -16,7 +16,7 @@ use serde::{Deserialize, Serialize}; /// This extractor extracts these values from the HTTP headers and connection info. /// /// `right_most_x_forwarded_for` = 126.0.0.2 -/// `connection_info_ip` = 126.0.0.1 +/// `connection_info_ip` = 126.0.0.3 /// /// More info about inner extractors : #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] From db21e6d08f1943b7ec614a93f42a116c747fc2ac Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Feb 2023 19:14:39 +0000 Subject: [PATCH 331/435] feat(http): [#187] compact announce response in public mode in Axum --- .../axum_implementation/handlers/announce.rs | 16 +- .../axum_implementation/responses/announce.rs | 293 ++++++++++++++++-- .../axum_implementation/services/announce.rs | 4 +- src/tracker/mod.rs | 6 +- tests/http_tracker.rs | 3 +- 5 files changed, 290 insertions(+), 32 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 0960510ba..81f57e810 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -9,8 +9,9 @@ use log::debug; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::peer_ip::assign_ip_address_to_peer; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::requests::announce::{Announce, Event}; -use crate::http::axum_implementation::{responses, services}; +use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; +use crate::http::axum_implementation::responses::announce; +use crate::http::axum_implementation::services; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; use crate::tracker::Tracker; @@ -30,9 +31,16 @@ pub async fn handle( let mut peer = peer_from_request(&announce_request, &peer_ip); - let response = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; + let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; - responses::announce::Announce::from(response).into_response() + match announce_request.compact { + Some(compact) => match compact { + Compact::Accepted => announce::Compact::from(announce_data).into_response(), + Compact::NotAccepted => announce::NonCompact::from(announce_data).into_response(), + }, + // Default response format non compact + None => announce::NonCompact::from(announce_data).into_response(), + } } /// It ignores the peer address in the announce request params. diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs index 63ec74ac2..303adcad9 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/axum_implementation/responses/announce.rs @@ -1,13 +1,22 @@ +use std::io::Write; use std::net::IpAddr; +use std::panic::Location; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use serde::{self, Deserialize, Serialize}; +use thiserror::Error; -use crate::tracker::{self, AnnounceResponse}; +use crate::http::axum_implementation::responses; +use crate::tracker::{self, AnnounceData}; +/// Normal (non compact) "announce" response +/// +/// BEP 03: The ``BitTorrent`` Protocol Specification +/// +/// #[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Announce { +pub struct NonCompact { pub interval: u32, #[serde(rename = "min interval")] pub interval_min: u32, @@ -33,7 +42,7 @@ impl From for Peer { } } -impl Announce { +impl NonCompact { /// # Panics /// /// It would panic if the `Announce` struct contained an inappropriate type. @@ -43,14 +52,14 @@ impl Announce { } } -impl IntoResponse for Announce { +impl IntoResponse for NonCompact { fn into_response(self) -> Response { (StatusCode::OK, self.write()).into_response() } } -impl From for Announce { - fn from(domain_announce_response: AnnounceResponse) -> Self { +impl From for NonCompact { + fn from(domain_announce_response: AnnounceData) -> Self { let peers: Vec = domain_announce_response.peers.iter().map(|peer| Peer::from(*peer)).collect(); Self { @@ -63,29 +72,271 @@ impl From for Announce { } } +/// Compact "announce" response +/// +/// BEP 23: Tracker Returns Compact Peer Lists +/// +/// +/// BEP 07: IPv6 Tracker Extension +/// +/// +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Compact { + pub interval: u32, + #[serde(rename = "min interval")] + pub interval_min: u32, + pub complete: u32, + pub incomplete: u32, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct CompactPeer { + pub ip: IpAddr, + pub port: u16, +} + +impl CompactPeer { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); + match self.ip { + IpAddr::V4(ip) => { + bytes.write_all(&u32::from(ip).to_be_bytes())?; + } + IpAddr::V6(ip) => { + bytes.write_all(&u128::from(ip).to_be_bytes())?; + } + } + bytes.write_all(&self.port.to_be_bytes())?; + Ok(bytes) + } +} + +impl From for CompactPeer { + fn from(peer: tracker::peer::Peer) -> Self { + CompactPeer { + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + } + } +} + +impl Compact { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); + + // Begin dictionary + bytes.write_all(b"d")?; + + // Write `interval` + // Dictionary key + bytes.write_all(b"8:interval")?; + // Dictionary key value + bytes.write_all(b"i")?; // Begin integer + bytes.write_all(self.interval.to_string().as_bytes())?; + bytes.write_all(b"e")?; // End integer + + // Write `interval_min` + // Dictionary key + bytes.write_all(b"12:min interval")?; + // Dictionary key value + bytes.write_all(b"i")?; // Begin integer + bytes.write_all(self.interval_min.to_string().as_bytes())?; + bytes.write_all(b"e")?; // End integer + + // Write `complete` + // Dictionary key + bytes.write_all(b"8:complete")?; + // Dictionary key value + bytes.write_all(b"i")?; // Begin integer + bytes.write_all(self.complete.to_string().as_bytes())?; + bytes.write_all(b"e")?; // End integer + + // Write `incomplete` + // Dictionary key + bytes.write_all(b"10:incomplete")?; + // Dictionary key value + bytes.write_all(b"i")?; // Begin integer + bytes.write_all(self.incomplete.to_string().as_bytes())?; + bytes.write_all(b"e")?; // End integer + + // Write peers with IPV4 IPs (BEP 23) + + // Dictionary key + bytes.write_all(b"5:peers")?; + // Dictionary key value + let mut peers_v4: Vec = Vec::new(); + for compact_peer in &self.peers { + match compact_peer.ip { + IpAddr::V4(_ip) => { + let peer_bytes = compact_peer.write()?; + peers_v4.write_all(&peer_bytes)?; + } + IpAddr::V6(_) => {} + } + } + bytes.write_all(peers_v4.len().to_string().as_bytes())?; // Begin byte string + bytes.write_all(b":")?; + bytes.write_all(peers_v4.as_slice())?; + + // todo: why is this `e` here? + bytes.write_all(b"e")?; + + // Write peers with IPV6 IPs (BEP 07) + + // Dictionary key + bytes.write_all(b"6:peers6")?; + // Dictionary key value + let mut peers_v6: Vec = Vec::new(); + for compact_peer in &self.peers { + match compact_peer.ip { + IpAddr::V6(_ip) => { + let peer_bytes = compact_peer.write()?; + peers_v6.write_all(&peer_bytes)?; + } + IpAddr::V4(_) => {} + } + } + bytes.write_all(peers_v6.len().to_string().as_bytes())?; // Begin byte string + bytes.write_all(b":")?; + bytes.write_all(peers_v6.as_slice())?; // End byte string + + // End dictionary + bytes.write_all(b"e")?; + + Ok(bytes) + } +} + +#[derive(Error, Debug)] +pub enum CompactSerializationError { + #[error("cannot write bytes: {inner_error} in {location}")] + CannotWriteBytes { + location: &'static Location<'static>, + inner_error: String, + }, +} + +impl From for responses::error::Error { + fn from(err: CompactSerializationError) -> Self { + responses::error::Error { + failure_reason: format!("{err}"), + } + } +} + +impl IntoResponse for Compact { + fn into_response(self) -> Response { + match self.write() { + Ok(bytes) => (StatusCode::OK, bytes).into_response(), + Err(err) => responses::error::Error::from(CompactSerializationError::CannotWriteBytes { + location: Location::caller(), + inner_error: format!("{err}"), + }) + .into_response(), + } + } +} + +impl From for Compact { + fn from(domain_announce_response: AnnounceData) -> Self { + let peers: Vec = domain_announce_response + .peers + .iter() + .map(|peer| CompactPeer::from(*peer)) + .collect(); + + Self { + interval: domain_announce_response.interval, + interval_min: domain_announce_response.interval_min, + complete: domain_announce_response.swam_stats.seeders, + incomplete: domain_announce_response.swam_stats.leechers, + peers, + } + } +} + #[cfg(test)] mod tests { - use std::net::IpAddr; - use std::str::FromStr; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + + use super::{NonCompact, Peer}; + use crate::http::axum_implementation::responses::announce::{Compact, CompactPeer}; - use super::{Announce, Peer}; + // Some ascii values used in tests: + // + // +-----------------+ + // | Dec | Hex | Chr | + // +-----------------+ + // | 105 | 69 | i | + // | 112 | 70 | p | + // +-----------------+ + // + // IP addresses and port numbers used in tests are chosen so that their bencoded representation + // is also a valid string which makes asserts more readable. #[test] - fn announce_response_can_be_bencoded() { - let response = Announce { - interval: 1, - interval_min: 2, - complete: 3, - incomplete: 4, - peers: vec![Peer { - peer_id: "-qB00000000000000001".to_string(), - ip: IpAddr::from_str("127.0.0.1").unwrap(), - port: 8080, - }], + fn non_compact_announce_response_can_be_bencoded() { + let response = NonCompact { + interval: 111, + interval_min: 222, + complete: 333, + incomplete: 444, + peers: vec![ + // IPV4 + Peer { + peer_id: "-qB00000000000000001".to_string(), + ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 + port: 0x7070, // 28784 + }, + // IPV6 + Peer { + peer_id: "-qB00000000000000002".to_string(), + ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + port: 0x7070, // 28784 + }, + ], }; // cspell:disable-next-line - assert_eq!(response.write(), "d8:completei3e10:incompletei4e8:intervali1e12:min intervali2e5:peersld2:ip9:127.0.0.17:peer_id20:-qB000000000000000014:porti8080eeee"); + assert_eq!(response.write(), "d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer_id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer_id20:-qB000000000000000024:porti28784eeee"); + } + + #[test] + fn compact_announce_response_can_be_bencoded() { + let response = Compact { + interval: 111, + interval_min: 222, + complete: 333, + incomplete: 444, + peers: vec![ + // IPV4 + CompactPeer { + ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 + port: 0x7070, // 28784 + }, + // IPV6 + CompactPeer { + ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + port: 0x7070, // 28784 + }, + ], + }; + + let bytes = response.write().unwrap(); + + // cspell:disable-next-line + assert_eq!( + bytes, + // cspell:disable-next-line + b"d8:intervali111e12:min intervali222e8:completei333e10:incompletei444e5:peers6:iiiippe6:peers618:iiiiiiiiiiiiiiiippe" + ); } } diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 9481354ba..6378c3008 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::Peer; -use crate::tracker::{statistics, AnnounceResponse, Tracker}; +use crate::tracker::{statistics, AnnounceData, Tracker}; -pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceResponse { +pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceData { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index cb3bd0e96..d406446ec 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -43,7 +43,7 @@ pub struct TorrentsMetrics { pub torrents: u64, } -pub struct AnnounceResponse { +pub struct AnnounceData { pub peers: Vec, pub swam_stats: SwamStats, pub interval: u32, @@ -86,7 +86,7 @@ impl Tracker { } /// It handles an announce request - pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceResponse { + pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -94,7 +94,7 @@ impl Tracker { // todo: remove peer by using its `Id` instead of its socket address: `get_peers_excluding_peer(peer_id: peer::Id)` let peers = self.get_peers_excluding_peers_with_address(info_hash, &peer.peer_addr).await; - AnnounceResponse { + AnnounceData { peers, swam_stats, interval: self.config.announce_interval, diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index ded30a0b4..3f0f4fbe3 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1833,8 +1833,7 @@ mod axum_http_tracker_server { assert_empty_announce_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_compact_response() { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html From ce95d7ae9ae3c09d833fa81f9a8d0e69a37842ab Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Feb 2023 13:15:56 +0000 Subject: [PATCH 332/435] feat: [#187] add cargo dependency: bip_bencode It will be used to build the bencoded compact annouce response in the HTTP tracker. We are currently writting directly bytes into a byte buffer, but Bencode specification imposes some restrictions like: The keys in a dictionary must me alphabetically orderded. We are not doing that in the current implementation. --- Cargo.lock | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++- Cargo.toml | 1 + 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 6f9d9231b..05b439353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,15 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +dependencies = [ + "gimli", +] + [[package]] name = "adler" version = "1.0.2" @@ -183,6 +192,21 @@ dependencies = [ "tower-service", ] +[[package]] +name = "backtrace" +version = "0.3.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide 0.6.2", + "object", + "rustc-demangle", +] + [[package]] name = "base-x" version = "0.2.11" @@ -236,6 +260,15 @@ dependencies = [ "which", ] +[[package]] +name = "bip_bencode" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6048cc5d9680544a5098a290d2845df7dae292c97687b9896b70365bad0ea416" +dependencies = [ + "error-chain", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -637,6 +670,15 @@ dependencies = [ "termcolor", ] +[[package]] +name = "error-chain" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" +dependencies = [ + "backtrace", +] + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -675,7 +717,7 @@ checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.5.4", ] [[package]] @@ -913,6 +955,12 @@ dependencies = [ "wasi 0.11.0+wasi-snapshot-preview1", ] +[[package]] +name = "gimli" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" + [[package]] name = "glob" version = "0.3.0" @@ -1381,6 +1429,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.8.4" @@ -1643,6 +1700,15 @@ dependencies = [ "libc", ] +[[package]] +name = "object" +version = "0.30.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" version = "1.15.0" @@ -2120,6 +2186,12 @@ dependencies = [ "serde", ] +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -2862,6 +2934,7 @@ dependencies = [ "axum-client-ip", "axum-server", "binascii", + "bip_bencode", "chrono", "config", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index 75ffa7935..917bc9e31 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ uuid = { version = "1", features = ["v4"] } axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } axum-client-ip = "0.4.0" +bip_bencode = "0.4.4" [dev-dependencies] From ad81009778ccfe0680ce9f91a22d03d97d686c5e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Feb 2023 13:18:35 +0000 Subject: [PATCH 333/435] fix(http): [#187] compact announce response - Remove extra byte "e" between "peer" and "peer6" dictionary keys. - Alphabetically order dictionary keys. --- .../axum_implementation/responses/announce.rs | 70 +++---------------- 1 file changed, 11 insertions(+), 59 deletions(-) diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs index 303adcad9..16bb51e4c 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/axum_implementation/responses/announce.rs @@ -4,6 +4,7 @@ use std::panic::Location; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; +use bip_bencode::{ben_bytes, ben_int, ben_map}; use serde::{self, Deserialize, Serialize}; use thiserror::Error; @@ -129,48 +130,6 @@ impl Compact { /// /// Will return `Err` if internally interrupted. pub fn write(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - - // Begin dictionary - bytes.write_all(b"d")?; - - // Write `interval` - // Dictionary key - bytes.write_all(b"8:interval")?; - // Dictionary key value - bytes.write_all(b"i")?; // Begin integer - bytes.write_all(self.interval.to_string().as_bytes())?; - bytes.write_all(b"e")?; // End integer - - // Write `interval_min` - // Dictionary key - bytes.write_all(b"12:min interval")?; - // Dictionary key value - bytes.write_all(b"i")?; // Begin integer - bytes.write_all(self.interval_min.to_string().as_bytes())?; - bytes.write_all(b"e")?; // End integer - - // Write `complete` - // Dictionary key - bytes.write_all(b"8:complete")?; - // Dictionary key value - bytes.write_all(b"i")?; // Begin integer - bytes.write_all(self.complete.to_string().as_bytes())?; - bytes.write_all(b"e")?; // End integer - - // Write `incomplete` - // Dictionary key - bytes.write_all(b"10:incomplete")?; - // Dictionary key value - bytes.write_all(b"i")?; // Begin integer - bytes.write_all(self.incomplete.to_string().as_bytes())?; - bytes.write_all(b"e")?; // End integer - - // Write peers with IPV4 IPs (BEP 23) - - // Dictionary key - bytes.write_all(b"5:peers")?; - // Dictionary key value let mut peers_v4: Vec = Vec::new(); for compact_peer in &self.peers { match compact_peer.ip { @@ -181,18 +140,7 @@ impl Compact { IpAddr::V6(_) => {} } } - bytes.write_all(peers_v4.len().to_string().as_bytes())?; // Begin byte string - bytes.write_all(b":")?; - bytes.write_all(peers_v4.as_slice())?; - - // todo: why is this `e` here? - bytes.write_all(b"e")?; - - // Write peers with IPV6 IPs (BEP 07) - // Dictionary key - bytes.write_all(b"6:peers6")?; - // Dictionary key value let mut peers_v6: Vec = Vec::new(); for compact_peer in &self.peers { match compact_peer.ip { @@ -203,12 +151,16 @@ impl Compact { IpAddr::V4(_) => {} } } - bytes.write_all(peers_v6.len().to_string().as_bytes())?; // Begin byte string - bytes.write_all(b":")?; - bytes.write_all(peers_v6.as_slice())?; // End byte string - // End dictionary - bytes.write_all(b"e")?; + let bytes = (ben_map! { + "complete" => ben_int!(i64::from(self.complete)), + "incomplete" => ben_int!(i64::from(self.incomplete)), + "interval" => ben_int!(i64::from(self.interval)), + "min interval" => ben_int!(i64::from(self.interval_min)), + "peers" => ben_bytes!(peers_v4), + "peers6" => ben_bytes!(peers_v6) + }) + .encode(); Ok(bytes) } @@ -336,7 +288,7 @@ mod tests { assert_eq!( bytes, // cspell:disable-next-line - b"d8:intervali111e12:min intervali222e8:completei333e10:incompletei444e5:peers6:iiiippe6:peers618:iiiiiiiiiiiiiiiippe" + b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe" ); } } From b1fa1e5669da3986c56367a6171d5faba7783d61 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Feb 2023 13:49:49 +0000 Subject: [PATCH 334/435] test(http): enable axum http tracker tests Those test should have been enabled when the implementation was done. --- .../axum_implementation/extractors/peer_ip.rs | 4 +++- tests/http/asserts.rs | 10 ++++++++++ tests/http_tracker.rs | 17 ++++++----------- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs index 7d615d0dc..9f7e92a9b 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -9,7 +9,9 @@ use crate::http::axum_implementation::responses; #[derive(Error, Debug)] pub enum ResolutionError { - #[error("missing the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}")] + #[error( + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" + )] MissingRightMostXForwardedForIp { location: &'static Location<'static> }, #[error("cannot get the client IP from the connection info in {location}")] MissingClientIp { location: &'static Location<'static> }, diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index e146f252d..ffb857951 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -138,6 +138,16 @@ pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(r ); } +pub async fn assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration)", + Location::caller(), + ); +} + pub async fn assert_invalid_remote_address_on_xff_header_error_response(response: Response) { assert_eq!(response.status(), 200); diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 3f0f4fbe3..413d28bcf 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1405,19 +1405,15 @@ mod axum_http_tracker_server { mod and_running_on_reverse_proxy { use torrust_tracker::http::Version; - use crate::http::asserts::{ - assert_could_not_find_remote_address_on_xff_header_error_response, - assert_invalid_remote_address_on_xff_header_error_response, - }; + use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_http_tracker_on_reverse_proxy; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { // If the tracker is running behind a reverse proxy, the peer IP is the - // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. + // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; @@ -1427,11 +1423,10 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - assert_could_not_find_remote_address_on_xff_header_error_response(response).await; + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; @@ -1441,7 +1436,7 @@ mod axum_http_tracker_server { .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; - assert_invalid_remote_address_on_xff_header_error_response(response).await; + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; } } From 502072250b74d369036636144ee95e733b6107d4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Feb 2023 17:47:42 +0000 Subject: [PATCH 335/435] feat(http): consider peers to be different if they have the same IP but a different port Other refactores were made to improve funtions names. BREAKING CHANGE: before this a peer with the same IP as the client that is making the announce request was removed from the announce response regardless whether they have the same IP or not. --- src/tracker/mod.rs | 18 +- src/tracker/peer.rs | 15 + src/tracker/services/torrent.rs | 2 +- src/tracker/torrent.rs | 470 ++++++++++++++++++-------------- 4 files changed, 284 insertions(+), 221 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index d406446ec..e01fe6a19 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -8,7 +8,7 @@ pub mod torrent; use std::collections::btree_map::Entry; use std::collections::BTreeMap; -use std::net::{IpAddr, SocketAddr}; +use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; use std::time::Duration; @@ -91,8 +91,7 @@ impl Tracker { let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - // todo: remove peer by using its `Id` instead of its socket address: `get_peers_excluding_peer(peer_id: peer::Id)` - let peers = self.get_peers_excluding_peers_with_address(info_hash, &peer.peer_addr).await; + let peers = self.get_peers_for_peer(info_hash, peer).await; AnnounceData { peers, @@ -298,16 +297,12 @@ impl Tracker { Ok(()) } - async fn get_peers_excluding_peers_with_address( - &self, - info_hash: &InfoHash, - excluded_address: &SocketAddr, - ) -> Vec { + async fn get_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(Some(excluded_address)).into_iter().copied().collect(), + Some(entry) => entry.get_peers_for_peer(peer).into_iter().copied().collect(), } } @@ -317,11 +312,14 @@ impl Tracker { match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(None).into_iter().copied().collect(), + Some(entry) => entry.get_all_peers().into_iter().copied().collect(), } } pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwamStats { + // code-review: consider splitting the function in two (command and query segregation). + // `update_torrent_with_peer` and `get_stats` + let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 735754529..18ce1b75f 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -10,6 +10,12 @@ use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; +#[derive(PartialEq, Eq, Debug)] +pub enum IPVersion { + IPv4, + IPv6, +} + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] pub struct Peer { pub peer_id: Id, @@ -37,6 +43,15 @@ impl Peer { pub fn change_ip(&mut self, new_ip: &IpAddr) { self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); } + + /// The IP version used by the peer: IPV4 or IPV6 + #[must_use] + pub fn ip_version(&self) -> IPVersion { + if self.peer_addr.is_ipv4() { + return IPVersion::IPv4; + } + IPVersion::IPv6 + } } #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index ba66d15f4..e2353876e 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -80,7 +80,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op let (seeders, completed, leechers) = torrent_entry.get_stats(); - let peers = torrent_entry.get_peers(None); + let peers = torrent_entry.get_all_peers(); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index b7b79f0f5..c2db6b027 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -1,10 +1,9 @@ -use std::net::{IpAddr, SocketAddr}; use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; -use super::peer; +use super::peer::{self, Peer}; use crate::protocol::clock::{Current, TimeNow}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; @@ -48,26 +47,24 @@ impl Entry { did_torrent_stats_change } + /// Get all peers, limiting the result to the maximum number of scrape torrents. #[must_use] - pub fn get_peers(&self, optional_excluded_address: Option<&SocketAddr>) -> Vec<&peer::Peer> { + pub fn get_all_peers(&self) -> Vec<&peer::Peer> { + self.peers.values().take(MAX_SCRAPE_TORRENTS as usize).collect() + } + + /// Returns the list of peers for a given client. The list filters out: + /// - The client peer that is making the request to the tracker + /// - Other peers that are not using the same IP version as the client peer. + #[must_use] + pub fn get_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { self.peers .values() - .filter(|peer| match optional_excluded_address { - // Don't filter on ip_version - None => true, - // Filter out different ip_version from remote_addr - Some(excluded_address) => { - // Skip ip address of client - if peer.peer_addr.ip() == excluded_address.ip() { - return false; - } - - match peer.peer_addr.ip() { - IpAddr::V4(_) => excluded_address.is_ipv4(), - IpAddr::V6(_) => excluded_address.is_ipv6(), - } - } - }) + // Take peers which are not the client peer + .filter(|peer| peer.peer_addr != client.peer_addr) + // Take only peers with the same IP version as the client peer + .filter(|peer| peer.ip_version() == client.ip_version()) + // Limit the number of peers on the result .take(MAX_SCRAPE_TORRENTS as usize) .collect() } @@ -101,264 +98,317 @@ pub struct SwamStats { #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::ops::Sub; - use std::time::Duration; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + mod torrent_entry { - use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; - use crate::tracker::peer; - use crate::tracker::torrent::Entry; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::ops::Sub; + use std::time::Duration; - struct TorrentPeerBuilder { - peer: peer::Peer, - } + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([0u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } - } + use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; + use crate::tracker::peer; + use crate::tracker::torrent::Entry; - pub fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self + struct TorrentPeerBuilder { + peer: peer::Peer, } - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = peer::Peer { + peer_id: peer::Id([0u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: Current::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; - self + pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + pub fn into(self) -> peer::Peer { + self.peer + } } - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self + /// A torrent seeder is a peer with 0 bytes left to download which + /// has not announced it has stopped + fn a_torrent_seeder() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(0) + .with_event_completed() + .into() } - pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self + /// A torrent leecher is a peer that is not a seeder. + /// Leecher: left > 0 OR event = Stopped + fn a_torrent_leecher() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(1) + .with_event_completed() + .into() } - pub fn into(self) -> peer::Peer { - self.peer + #[test] + fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { + let torrent_entry = Entry::new(); + + assert_eq!(torrent_entry.get_all_peers().len(), 0); } - } - /// A torrent seeder is a peer with 0 bytes left to download which - /// has not announced it has stopped - fn a_torrent_seeder() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(0) - .with_event_completed() - .into() - } + #[test] + fn a_new_peer_can_be_added_to_a_torrent_entry() { + let mut torrent_entry = Entry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); - /// A torrent leecher is a peer that is not a seeder. - /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(1) - .with_event_completed() - .into() - } + torrent_entry.update_peer(&torrent_peer); // Add the peer - #[test] - fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = Entry::new(); + assert_eq!(*torrent_entry.get_all_peers()[0], torrent_peer); + assert_eq!(torrent_entry.get_all_peers().len(), 1); + } - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } + #[test] + fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { + let mut torrent_entry = Entry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); - #[test] - fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = Entry::new(); - let torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer - torrent_entry.update_peer(&torrent_peer); // Add the peer + assert_eq!(torrent_entry.get_all_peers(), vec![&torrent_peer]); + } - assert_eq!(*torrent_entry.get_peers(None)[0], torrent_peer); - assert_eq!(torrent_entry.get_peers(None).len(), 1); - } + #[test] + fn a_peer_can_be_updated_in_a_torrent_entry() { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer - #[test] - fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_peer = TorrentPeerBuilder::default().into(); + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry - torrent_entry.update_peer(&torrent_peer); // Add the peer + assert_eq!(torrent_entry.get_all_peers()[0].event, AnnounceEvent::Completed); + } - assert_eq!(torrent_entry.get_peers(None), vec![&torrent_peer]); - } + #[test] + fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer - #[test] - fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_peer.event = AnnounceEvent::Stopped; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + assert_eq!(torrent_entry.get_all_peers().len(), 0); + } - assert_eq!(torrent_entry.get_peers(None)[0].event, AnnounceEvent::Completed); - } + #[test] + fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); - #[test] - fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + torrent_entry.update_peer(&torrent_peer); // Add the peer - torrent_peer.event = AnnounceEvent::Stopped; // Update the peer - torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + let stats_have_changed = torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry - assert_eq!(torrent_entry.get_peers(None).len(), 0); - } + assert!(stats_have_changed); + } - #[test] - fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); + #[test] + fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( + ) { + let mut torrent_entry = Entry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + // Add a peer that did not exist before in the entry + let torrent_stats_have_not_changed = !torrent_entry.update_peer(&torrent_peer_announcing_complete_event); - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - let stats_have_changed = torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + assert!(torrent_stats_have_not_changed); + } - assert!(stats_have_changed); - } + #[test] + fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() + { + let mut torrent_entry = Entry::new(); + let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); + torrent_entry.update_peer(&torrent_peer); // Add peer - #[test] - fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( - ) { - let mut torrent_entry = Entry::new(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + // Get peers excluding the one we have just added + let peers = torrent_entry.get_peers_for_peer(&torrent_peer); - // Add a peer that did not exist before in the entry - let torrent_stats_have_not_changed = !torrent_entry.update_peer(&torrent_peer_announcing_complete_event); + assert_eq!(peers.len(), 0); + } - assert!(torrent_stats_have_not_changed); - } + #[test] + fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_peers_that_do_not_use_the_same_ip_version( + ) { + let mut torrent_entry = Entry::new(); - #[test] - fn a_torrent_entry_could_filter_out_peers_with_a_given_socket_address() { - let mut torrent_entry = Entry::new(); - let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); - torrent_entry.update_peer(&torrent_peer); // Add peer + // Add peer 1 using IPV4 + let peer1_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let torrent_peer_1 = TorrentPeerBuilder::default().with_peer_address(peer1_socket_address).into(); + torrent_entry.update_peer(&torrent_peer_1); - // Get peers excluding the one we have just added - let peers = torrent_entry.get_peers(Some(&peer_socket_address)); + // Add peer 2 using IPV6 + let peer2_socket_address = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff)), 8080); + let torrent_peer_2 = TorrentPeerBuilder::default().with_peer_address(peer2_socket_address).into(); + torrent_entry.update_peer(&torrent_peer_2); - assert_eq!(peers.len(), 0); - } + // Get peers for peer 1 + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1); - fn peer_id_from_i32(number: i32) -> peer::Id { - let peer_id = number.to_le_bytes(); - peer::Id([ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], - peer_id[3], - ]) - } + // Peer using IPV6 should not be included + assert_eq!(peers.len(), 0); + } + + #[test] + fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { + let mut torrent_entry = Entry::new(); - #[test] - fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = Entry::new(); + let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + + // Add peer 1 + let torrent_peer_1 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8080)) + .into(); + torrent_entry.update_peer(&torrent_peer_1); - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let torrent_peer = TorrentPeerBuilder::default() - .with_peer_id(peer_id_from_i32(peer_number)) + // Add peer 2 + let torrent_peer_2 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8081)) .into(); - torrent_entry.update_peer(&torrent_peer); + torrent_entry.update_peer(&torrent_peer_2); + + // Get peers for peer 1 + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1); + + // The peer 2 using the same IP but different port should be included + assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); + assert_eq!(peers[0].peer_addr.port(), 8081); } - let peers = torrent_entry.get_peers(None); + fn peer_id_from_i32(number: i32) -> peer::Id { + let peer_id = number.to_le_bytes(); + peer::Id([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], + peer_id[2], peer_id[3], + ]) + } - assert_eq!(peers.len(), 74); - } + #[test] + fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { + let mut torrent_entry = Entry::new(); - #[test] - fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_seeder = a_torrent_seeder(); + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let torrent_peer = TorrentPeerBuilder::default() + .with_peer_id(peer_id_from_i32(peer_number)) + .into(); + torrent_entry.update_peer(&torrent_peer); + } - torrent_entry.update_peer(&torrent_seeder); // Add seeder + let peers = torrent_entry.get_all_peers(); - assert_eq!(torrent_entry.get_stats().0, 1); - } + assert_eq!(peers.len(), 74); + } - #[test] - fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_leecher = a_torrent_leecher(); + #[test] + fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { + let mut torrent_entry = Entry::new(); + let torrent_seeder = a_torrent_seeder(); - torrent_entry.update_peer(&torrent_leecher); // Add leecher + torrent_entry.update_peer(&torrent_seeder); // Add seeder - assert_eq!(torrent_entry.get_stats().2, 1); - } + assert_eq!(torrent_entry.get_stats().0, 1); + } - #[test] - fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( - ) { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer + #[test] + fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { + let mut torrent_entry = Entry::new(); + let torrent_leecher = a_torrent_leecher(); - // Announce "Completed" torrent download event. - torrent_peer.event = AnnounceEvent::Completed; - torrent_entry.update_peer(&torrent_peer); // Update the peer + torrent_entry.update_peer(&torrent_leecher); // Add leecher - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; + assert_eq!(torrent_entry.get_stats().2, 1); + } - assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); - } + #[test] + fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( + ) { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer - #[test] - fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = Entry::new(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + // Announce "Completed" torrent download event. + torrent_peer.event = AnnounceEvent::Completed; + torrent_entry.update_peer(&torrent_peer); // Update the peer - // Announce "Completed" torrent download event. - // It's the first event announced from this peer. - torrent_entry.update_peer(&torrent_peer_announcing_complete_event); // Add the peer + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; + assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); + } - assert_eq!(number_of_peers_with_completed_torrent, 0); - } + #[test] + fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { + let mut torrent_entry = Entry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Announce "Completed" torrent download event. + // It's the first event announced from this peer. + torrent_entry.update_peer(&torrent_peer_announcing_complete_event); // Add the peer - #[test] - fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = Entry::new(); + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; - let timeout = 120u32; + assert_eq!(number_of_peers_with_completed_torrent, 0); + } - let now = Working::now(); - Stopped::local_set(&now); + #[test] + fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { + let mut torrent_entry = Entry::new(); - let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); - let inactive_peer = TorrentPeerBuilder::default() - .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) - .into(); - torrent_entry.update_peer(&inactive_peer); // Add the peer + let timeout = 120u32; - torrent_entry.remove_inactive_peers(timeout); + let now = Working::now(); + Stopped::local_set(&now); - assert_eq!(torrent_entry.peers.len(), 0); + let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); + let inactive_peer = TorrentPeerBuilder::default() + .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) + .into(); + torrent_entry.update_peer(&inactive_peer); // Add the peer + + torrent_entry.remove_inactive_peers(timeout); + + assert_eq!(torrent_entry.peers.len(), 0); + } } } From 7b9131ec9255299e6685565488c5b6b386725414 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Feb 2023 18:36:09 +0000 Subject: [PATCH 336/435] fix(http): [#187] fix non-compact announce response Fixed some errors in the normal (non-compact) announce response in the HTTP tracker. Only fo rthe new Aux, implementation: - The peer id should be a 20-byte string not the hex value representation as an string. - Response body (bencode) should be binary (bytes). - The "peer id" key in the peer dictionary should have a white space "peer id" intead of "peer_id" (with underscore). --- .../axum_implementation/responses/announce.rs | 109 ++++++++++++------ src/tracker/peer.rs | 10 ++ tests/http/asserts.rs | 14 +-- tests/http/asserts_warp.rs | 15 +++ tests/http/mod.rs | 1 + tests/http/responses/announce.rs | 8 +- tests/http/responses/announce_warp.rs | 30 +++++ tests/http/responses/mod.rs | 1 + tests/http_tracker.rs | 12 +- 9 files changed, 149 insertions(+), 51 deletions(-) create mode 100644 tests/http/asserts_warp.rs create mode 100644 tests/http/responses/announce_warp.rs diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs index 16bb51e4c..a91266490 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/axum_implementation/responses/announce.rs @@ -4,7 +4,7 @@ use std::panic::Location; use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use bip_bencode::{ben_bytes, ben_int, ben_map}; +use bip_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; use serde::{self, Deserialize, Serialize}; use thiserror::Error; @@ -28,15 +28,26 @@ pub struct NonCompact { #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Peer { - pub peer_id: String, + pub peer_id: [u8; 20], pub ip: IpAddr, pub port: u16, } +impl Peer { + #[must_use] + pub fn ben_map(&self) -> BencodeMut { + ben_map! { + "peer id" => ben_bytes!(self.peer_id.clone().to_vec()), + "ip" => ben_bytes!(self.ip.to_string()), + "port" => ben_int!(i64::from(self.port)) + } + } +} + impl From for Peer { fn from(peer: tracker::peer::Peer) -> Self { Peer { - peer_id: peer.peer_id.to_string(), + peer_id: peer.peer_id.to_bytes(), ip: peer.peer_addr.ip(), port: peer.peer_addr.port(), } @@ -46,16 +57,29 @@ impl From for Peer { impl NonCompact { /// # Panics /// - /// It would panic if the `Announce` struct contained an inappropriate type. + /// Will return an error if it can't access the bencode as a mutable `BListAccess`. #[must_use] - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() + pub fn body(&self) -> Vec { + let mut peers_list = ben_list!(); + let peers_list_mut = peers_list.list_mut().unwrap(); + for peer in &self.peers { + peers_list_mut.push(peer.ben_map()); + } + + (ben_map! { + "complete" => ben_int!(i64::from(self.complete)), + "incomplete" => ben_int!(i64::from(self.incomplete)), + "interval" => ben_int!(i64::from(self.interval)), + "min interval" => ben_int!(i64::from(self.interval_min)), + "peers" => peers_list.clone() + }) + .encode() } } impl IntoResponse for NonCompact { fn into_response(self) -> Response { - (StatusCode::OK, self.write()).into_response() + (StatusCode::OK, self.body()).into_response() } } @@ -101,7 +125,7 @@ impl CompactPeer { /// # Errors /// /// Will return `Err` if internally interrupted. - pub fn write(&self) -> Result, Box> { + pub fn bytes(&self) -> Result, Box> { let mut bytes: Vec = Vec::new(); match self.ip { IpAddr::V4(ip) => { @@ -129,39 +153,45 @@ impl Compact { /// # Errors /// /// Will return `Err` if internally interrupted. - pub fn write(&self) -> Result, Box> { - let mut peers_v4: Vec = Vec::new(); + pub fn body(&self) -> Result, Box> { + let bytes = (ben_map! { + "complete" => ben_int!(i64::from(self.complete)), + "incomplete" => ben_int!(i64::from(self.incomplete)), + "interval" => ben_int!(i64::from(self.interval)), + "min interval" => ben_int!(i64::from(self.interval_min)), + "peers" => ben_bytes!(self.peers_v4_bytes()?), + "peers6" => ben_bytes!(self.peers_v6_bytes()?) + }) + .encode(); + + Ok(bytes) + } + + fn peers_v4_bytes(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); for compact_peer in &self.peers { match compact_peer.ip { IpAddr::V4(_ip) => { - let peer_bytes = compact_peer.write()?; - peers_v4.write_all(&peer_bytes)?; + let peer_bytes = compact_peer.bytes()?; + bytes.write_all(&peer_bytes)?; } IpAddr::V6(_) => {} } } + Ok(bytes) + } - let mut peers_v6: Vec = Vec::new(); + fn peers_v6_bytes(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); for compact_peer in &self.peers { match compact_peer.ip { IpAddr::V6(_ip) => { - let peer_bytes = compact_peer.write()?; - peers_v6.write_all(&peer_bytes)?; + let peer_bytes = compact_peer.bytes()?; + bytes.write_all(&peer_bytes)?; } IpAddr::V4(_) => {} } } - - let bytes = (ben_map! { - "complete" => ben_int!(i64::from(self.complete)), - "incomplete" => ben_int!(i64::from(self.incomplete)), - "interval" => ben_int!(i64::from(self.interval)), - "min interval" => ben_int!(i64::from(self.interval_min)), - "peers" => ben_bytes!(peers_v4), - "peers6" => ben_bytes!(peers_v6) - }) - .encode(); - Ok(bytes) } } @@ -185,7 +215,7 @@ impl From for responses::error::Error { impl IntoResponse for Compact { fn into_response(self) -> Response { - match self.write() { + match self.body() { Ok(bytes) => (StatusCode::OK, bytes).into_response(), Err(err) => responses::error::Error::from(CompactSerializationError::CannotWriteBytes { location: Location::caller(), @@ -244,21 +274,28 @@ mod tests { peers: vec![ // IPV4 Peer { - peer_id: "-qB00000000000000001".to_string(), + peer_id: *b"-qB00000000000000001", ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 port: 0x7070, // 28784 }, // IPV6 Peer { - peer_id: "-qB00000000000000002".to_string(), + peer_id: *b"-qB00000000000000002", ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), port: 0x7070, // 28784 }, ], }; + let bytes = response.body(); + // cspell:disable-next-line - assert_eq!(response.write(), "d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer_id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer_id20:-qB000000000000000024:porti28784eeee"); + let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); } #[test] @@ -282,13 +319,15 @@ mod tests { ], }; - let bytes = response.write().unwrap(); + let bytes = response.body().unwrap(); - // cspell:disable-next-line - assert_eq!( - bytes, + let expected_bytes = // cspell:disable-next-line - b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe" + b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() ); } } diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 18ce1b75f..c6d87f036 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -84,6 +84,11 @@ impl Id { ret.0.clone_from_slice(bytes); ret } + + #[must_use] + pub fn to_bytes(&self) -> [u8; 20] { + self.0 + } } impl From<[u8; 20]> for Id { @@ -369,6 +374,11 @@ mod test { ]); assert_eq!(id.to_string(), "009f9296009f9296009f9296009f9296009f9296"); } + + #[test] + fn should_return_the_inner_bytes() { + assert_eq!(peer::Id(*b"-qB00000000000000000").to_bytes(), *b"-qB00000000000000000"); + } } mod torrent_peer { diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index ffb857951..a10edc9e6 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -30,17 +30,15 @@ pub async fn assert_empty_announce_response(response: Response) { pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { assert_eq!(response.status(), 200); - let body = response.text().await.unwrap(); - let announce_response: Announce = serde_bencode::from_str(&body) - .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{}\"", &body)); + + let body = response.bytes().await.unwrap(); + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); + assert_eq!(announce_response, *expected_announce_response); } -/// Sample bencoded announce response as byte array: -/// -/// ```text -/// b"d8:intervali120e12:min intervali120e8:completei2e10:incompletei0e5:peers6:~\0\0\x01\x1f\x90e6:peers60:e" -/// ``` pub async fn assert_compact_announce_response(response: Response, expected_response: &Compact) { assert_eq!(response.status(), 200); diff --git a/tests/http/asserts_warp.rs b/tests/http/asserts_warp.rs new file mode 100644 index 000000000..6bda82f6c --- /dev/null +++ b/tests/http/asserts_warp.rs @@ -0,0 +1,15 @@ +/// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. +use reqwest::Response; + +use super::responses::announce_warp::WarpAnnounce; + +pub async fn assert_warp_announce_response(response: Response, expected_announce_response: &WarpAnnounce) { + assert_eq!(response.status(), 200); + + let body = response.text().await.unwrap(); + + let announce_response: WarpAnnounce = serde_bencode::from_str(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); + + assert_eq!(announce_response, *expected_announce_response); +} diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 8c1e3c995..40616025b 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,4 +1,5 @@ pub mod asserts; +pub mod asserts_warp; pub mod client; pub mod connection_info; pub mod requests; diff --git a/tests/http/responses/announce.rs b/tests/http/responses/announce.rs index e976ba9ae..8a07ebd5e 100644 --- a/tests/http/responses/announce.rs +++ b/tests/http/responses/announce.rs @@ -10,20 +10,22 @@ pub struct Announce { pub interval: u32, #[serde(rename = "min interval")] pub min_interval: u32, - pub peers: Vec, // Peers with IPV4 + pub peers: Vec, // Peers using IPV4 and IPV6 } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct DictionaryPeer { pub ip: String, - pub peer_id: String, + #[serde(rename = "peer id")] + #[serde(with = "serde_bytes")] + pub peer_id: Vec, pub port: u16, } impl From for DictionaryPeer { fn from(peer: Peer) -> Self { DictionaryPeer { - peer_id: peer.peer_id.to_string(), + peer_id: peer.peer_id.to_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), port: peer.peer_addr.port(), } diff --git a/tests/http/responses/announce_warp.rs b/tests/http/responses/announce_warp.rs new file mode 100644 index 000000000..0fcf05eb8 --- /dev/null +++ b/tests/http/responses/announce_warp.rs @@ -0,0 +1,30 @@ +/// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. +use serde::{self, Deserialize, Serialize}; +use torrust_tracker::tracker::peer::Peer; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct WarpAnnounce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, // Peers using IPV4 +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct WarpDictionaryPeer { + pub ip: String, + pub peer_id: String, + pub port: u16, +} + +impl From for WarpDictionaryPeer { + fn from(peer: Peer) -> Self { + Self { + peer_id: peer.peer_id.to_string(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} diff --git a/tests/http/responses/mod.rs b/tests/http/responses/mod.rs index bdc689056..aecb53fed 100644 --- a/tests/http/responses/mod.rs +++ b/tests/http/responses/mod.rs @@ -1,3 +1,4 @@ pub mod announce; +pub mod announce_warp; pub mod error; pub mod scrape; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 413d28bcf..9e62b94a2 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -85,10 +85,12 @@ mod warp_http_tracker_server { assert_internal_server_error_response, assert_invalid_info_hash_error_response, assert_invalid_peer_id_error_response, assert_is_announce_response, }; + use crate::http::asserts_warp::assert_warp_announce_response; use crate::http::client::Client; use crate::http::requests::announce::{Compact, QueryBuilder}; use crate::http::responses; - use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList}; + use crate::http::responses::announce_warp::{WarpAnnounce, WarpDictionaryPeer}; use crate::http::server::{ start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, start_ipv6_http_tracker, start_public_http_tracker, @@ -395,15 +397,15 @@ mod warp_http_tracker_server { ) .await; - // It should only contain teh previously announced peer - assert_announce_response( + // It should only contain the previously announced peer + assert_warp_announce_response( response, - &Announce { + &WarpAnnounce { complete: 2, incomplete: 0, interval: http_tracker_server.tracker.config.announce_interval, min_interval: http_tracker_server.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(previously_announced_peer)], + peers: vec![WarpDictionaryPeer::from(previously_announced_peer)], }, ) .await; From 0df1a79f31030c30d2dd903c2b27e1bd049c4a45 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Feb 2023 19:21:43 +0000 Subject: [PATCH 337/435] fix(http): [#187] add peers using both IPv4 an IPv6 in announce response The normal (non-compact) announce response should included all peers except the peer making the request. We were excluding peers that are nos using the same IP version as the peer making the request. Peers are included in the bencoded response in the "peers" key of the main bencoded dictionary. --- src/tracker/torrent.rs | 31 ++---------- tests/api/server.rs | 2 +- tests/common/fixtures.rs | 6 +++ tests/http/server.rs | 2 +- tests/http_tracker.rs | 106 ++++++++++++++++++++++++++++++--------- tests/tracker_api.rs | 24 ++++++--- 6 files changed, 110 insertions(+), 61 deletions(-) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index c2db6b027..3161cd36b 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -53,17 +53,14 @@ impl Entry { self.peers.values().take(MAX_SCRAPE_TORRENTS as usize).collect() } - /// Returns the list of peers for a given client. The list filters out: - /// - The client peer that is making the request to the tracker - /// - Other peers that are not using the same IP version as the client peer. + /// Returns the list of peers for a given client. + /// It filters out the input peer. #[must_use] pub fn get_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { self.peers .values() // Take peers which are not the client peer .filter(|peer| peer.peer_addr != client.peer_addr) - // Take only peers with the same IP version as the client peer - .filter(|peer| peer.ip_version() == client.ip_version()) // Limit the number of peers on the result .take(MAX_SCRAPE_TORRENTS as usize) .collect() @@ -101,7 +98,7 @@ mod tests { mod torrent_entry { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::ops::Sub; use std::time::Duration; @@ -268,28 +265,6 @@ mod tests { assert_eq!(peers.len(), 0); } - #[test] - fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_peers_that_do_not_use_the_same_ip_version( - ) { - let mut torrent_entry = Entry::new(); - - // Add peer 1 using IPV4 - let peer1_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer_1 = TorrentPeerBuilder::default().with_peer_address(peer1_socket_address).into(); - torrent_entry.update_peer(&torrent_peer_1); - - // Add peer 2 using IPV6 - let peer2_socket_address = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff)), 8080); - let torrent_peer_2 = TorrentPeerBuilder::default().with_peer_address(peer2_socket_address).into(); - torrent_entry.update_peer(&torrent_peer_2); - - // Get peers for peer 1 - let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1); - - // Peer using IPV6 should not be included - assert_eq!(peers.len(), 0); - } - #[test] fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { let mut torrent_entry = Entry::new(); diff --git a/tests/api/server.rs b/tests/api/server.rs index c1cd0630a..0e23a4320 100644 --- a/tests/api/server.rs +++ b/tests/api/server.rs @@ -72,7 +72,7 @@ impl Server { } /// Add a torrent to the tracker - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 1ead0db0c..d4b3e9812 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -22,6 +22,12 @@ impl PeerBuilder { self } + #[allow(dead_code)] + pub fn with_peer_addr(mut self, peer_addr: &SocketAddr) -> Self { + self.peer.peer_addr = *peer_addr; + self + } + #[allow(dead_code)] pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { self.peer.left = NumberOfBytes(left); diff --git a/tests/http/server.rs b/tests/http/server.rs index e5266eee5..1c8d1cb77 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -131,7 +131,7 @@ impl Server { self.connection_info.clone() } - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 9e62b94a2..a09802724 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -385,7 +385,9 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(http_tracker_server.get_connection_info()) @@ -419,7 +421,7 @@ mod warp_http_tracker_server { let peer = PeerBuilder::default().build(); // Add a peer - http_tracker_server.add_torrent(&info_hash, &peer).await; + http_tracker_server.add_torrent_peer(&info_hash, &peer).await; let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -450,7 +452,9 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2 accepting compact responses let response = Client::new(http_tracker_server.get_connection_info()) @@ -489,7 +493,9 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list @@ -783,7 +789,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -821,7 +827,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1001,7 +1007,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1030,7 +1036,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1156,7 +1162,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1185,7 +1191,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1227,7 +1233,7 @@ mod warp_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -1455,7 +1461,7 @@ mod axum_http_tracker_server { // Vuze (bittorrent client) docs: // https://wiki.vuze.com/w/Announce - use std::net::{IpAddr, Ipv6Addr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; use local_ip_address::local_ip; @@ -1780,7 +1786,9 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(http_tracker_server.get_connection_info()) @@ -1806,6 +1814,54 @@ mod axum_http_tracker_server { .await; } + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { + let http_tracker_server = start_public_http_tracker(Version::Axum).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Announce a peer using IPV4 + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) + .build(); + http_tracker_server.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + + // Announce a peer using IPV6 + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + )) + .build(); + http_tracker_server.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + + // Announce the new Peer. + let response = Client::new(http_tracker_server.get_connection_info()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000003")) + .query(), + ) + .await; + + // The newly announced peer is not included on the response peer list, + // but all the previously announced peers should be included regardless the IP version they are using. + assert_announce_response( + response, + &Announce { + complete: 3, + incomplete: 0, + interval: http_tracker_server.tracker.config.announce_interval, + min_interval: http_tracker_server.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], + }, + ) + .await; + } + #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -1814,7 +1870,7 @@ mod axum_http_tracker_server { let peer = PeerBuilder::default().build(); // Add a peer - http_tracker_server.add_torrent(&info_hash, &peer).await; + http_tracker_server.add_torrent_peer(&info_hash, &peer).await; let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -1845,7 +1901,9 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2 accepting compact responses let response = Client::new(http_tracker_server.get_connection_info()) @@ -1884,7 +1942,9 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server.add_torrent(&info_hash, &previously_announced_peer).await; + http_tracker_server + .add_torrent_peer(&info_hash, &previously_announced_peer) + .await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list @@ -2181,7 +2241,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2220,7 +2280,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2406,7 +2466,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2436,7 +2496,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2566,7 +2626,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2596,7 +2656,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) @@ -2639,7 +2699,7 @@ mod axum_http_tracker_server { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); http_tracker - .add_torrent( + .add_torrent_peer( &info_hash, &PeerBuilder::default() .with_peer_id(&peer::Id(*b"-qB00000000000000001")) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index b79e8a8af..193c6487c 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -115,7 +115,7 @@ mod tracker_apis { let api_server = start_default_api().await; api_server - .add_torrent( + .add_torrent_peer( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), &PeerBuilder::default().into(), ) @@ -189,7 +189,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent(&info_hash, &PeerBuilder::default().into()).await; + api_server.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::empty()) @@ -216,8 +216,12 @@ mod tracker_apis { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server.add_torrent(&info_hash_1, &PeerBuilder::default().into()).await; - api_server.add_torrent(&info_hash_2, &PeerBuilder::default().into()).await; + api_server + .add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()) + .await; + api_server + .add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()) + .await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) @@ -244,8 +248,12 @@ mod tracker_apis { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server.add_torrent(&info_hash_1, &PeerBuilder::default().into()).await; - api_server.add_torrent(&info_hash_2, &PeerBuilder::default().into()).await; + api_server + .add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()) + .await; + api_server + .add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()) + .await; let response = Client::new(api_server.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) @@ -319,7 +327,7 @@ mod tracker_apis { let peer = PeerBuilder::default().into(); - api_server.add_torrent(&info_hash, &peer).await; + api_server.add_torrent_peer(&info_hash, &peer).await; let response = Client::new(api_server.get_connection_info()) .get_torrent(&info_hash.to_string()) @@ -378,7 +386,7 @@ mod tracker_apis { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent(&info_hash, &PeerBuilder::default().into()).await; + api_server.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) .get_torrent(&info_hash.to_string()) From ea8d4d8432f20a93850a90f6aee0e325e55c7dc6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Feb 2023 13:13:55 +0000 Subject: [PATCH 338/435] feat(http): [#191] add route and extractor for scrape req in Axum HTTP tracker with only one infohash in the URL: http://localhost:7070/scrape?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0 It does not allow more than one infohas yet. --- .../axum_implementation/extractors/mod.rs | 1 + .../extractors/scrape_request.rs | 45 ++++++ src/http/axum_implementation/handlers/mod.rs | 1 + .../axum_implementation/handlers/scrape.rs | 19 +++ .../axum_implementation/handlers/status.rs | 2 +- src/http/axum_implementation/requests/mod.rs | 1 + .../axum_implementation/requests/scrape.rs | 137 ++++++++++++++++++ src/http/axum_implementation/routes.rs | 10 +- 8 files changed, 211 insertions(+), 5 deletions(-) create mode 100644 src/http/axum_implementation/extractors/scrape_request.rs create mode 100644 src/http/axum_implementation/handlers/scrape.rs create mode 100644 src/http/axum_implementation/requests/scrape.rs diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 65b2775a9..380eeda6d 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,3 +1,4 @@ pub mod announce_request; pub mod peer_ip; pub mod remote_client_ip; +pub mod scrape_request; diff --git a/src/http/axum_implementation/extractors/scrape_request.rs b/src/http/axum_implementation/extractors/scrape_request.rs new file mode 100644 index 000000000..4212abfcb --- /dev/null +++ b/src/http/axum_implementation/extractors/scrape_request.rs @@ -0,0 +1,45 @@ +use std::panic::Location; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; + +use crate::http::axum_implementation::query::Query; +use crate::http::axum_implementation::requests::scrape::{ParseScrapeQueryError, Scrape}; +use crate::http::axum_implementation::responses; + +pub struct ExtractRequest(pub Scrape); + +#[async_trait] +impl FromRequestParts for ExtractRequest +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let raw_query = parts.uri.query(); + + if raw_query.is_none() { + return Err(responses::error::Error::from(ParseScrapeQueryError::MissingParams { + location: Location::caller(), + }) + .into_response()); + } + + let query = raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error).into_response()); + } + + let scrape_request = Scrape::try_from(query.unwrap()); + + if let Err(error) = scrape_request { + return Err(responses::error::Error::from(error).into_response()); + } + + Ok(ExtractRequest(scrape_request.unwrap())) + } +} diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index bff05984c..4e6849534 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -1,2 +1,3 @@ pub mod announce; +pub mod scrape; pub mod status; diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs new file mode 100644 index 000000000..094bf844b --- /dev/null +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -0,0 +1,19 @@ +use std::sync::Arc; + +use axum::extract::State; +use log::debug; + +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; +use crate::tracker::Tracker; + +#[allow(clippy::unused_async)] +pub async fn handle( + State(_tracker): State>, + ExtractRequest(scrape_request): ExtractRequest, + _remote_client_ip: RemoteClientIp, +) -> String { + debug!("http scrape request: {:#?}", &scrape_request); + + format!("{:#?}", &scrape_request) +} diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs index d4031aef5..8a058b456 100644 --- a/src/http/axum_implementation/handlers/status.rs +++ b/src/http/axum_implementation/handlers/status.rs @@ -7,6 +7,6 @@ use crate::http::axum_implementation::resources::ok::Ok; use crate::http::axum_implementation::responses::ok; #[allow(clippy::unused_async)] -pub async fn get_status_handler(remote_client_ip: RemoteClientIp) -> Json { +pub async fn handle(remote_client_ip: RemoteClientIp) -> Json { ok::response(&remote_client_ip) } diff --git a/src/http/axum_implementation/requests/mod.rs b/src/http/axum_implementation/requests/mod.rs index 74894de33..776d2dfbf 100644 --- a/src/http/axum_implementation/requests/mod.rs +++ b/src/http/axum_implementation/requests/mod.rs @@ -1 +1,2 @@ pub mod announce; +pub mod scrape; diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/axum_implementation/requests/scrape.rs new file mode 100644 index 000000000..483738a03 --- /dev/null +++ b/src/http/axum_implementation/requests/scrape.rs @@ -0,0 +1,137 @@ +use std::panic::Location; + +use thiserror::Error; + +use crate::http::axum_implementation::query::Query; +use crate::http::axum_implementation::responses; +use crate::http::percent_encoding::percent_decode_info_hash; +use crate::located_error::{Located, LocatedError}; +use crate::protocol::info_hash::{ConversionError, InfoHash}; + +pub type NumberOfBytes = i64; + +// Query param name +const INFO_HASH_SCRAPE_PARAM: &str = "info_hash"; + +#[derive(Debug, PartialEq)] +pub struct Scrape { + pub info_hashes: Vec, +} + +#[derive(Error, Debug)] +pub enum ParseScrapeQueryError { + #[error("missing query params for scrape request in {location}")] + MissingParams { location: &'static Location<'static> }, + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + #[error("invalid param value {param_value} for {param_name} in {location}")] + InvalidParam { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, ConversionError>, + }, +} + +impl From for responses::error::Error { + fn from(err: ParseScrapeQueryError) -> Self { + responses::error::Error { + failure_reason: format!("Cannot parse query params for scrape request: {err}"), + } + } +} + +impl TryFrom for Scrape { + type Error = ParseScrapeQueryError; + + fn try_from(query: Query) -> Result { + Ok(Self { + info_hashes: extract_info_hashes(&query)?, + }) + } +} + +fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryError> { + match query.get_param(INFO_HASH_SCRAPE_PARAM) { + Some(raw_param) => { + let mut info_hashes = vec![]; + + // todo: multiple infohashes + + let info_hash = percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { + param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?; + + info_hashes.push(info_hash); + + Ok(info_hashes) + } + None => { + return Err(ParseScrapeQueryError::MissingParam { + location: Location::caller(), + param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + }) + } + } +} + +#[cfg(test)] +mod tests { + + mod scrape_request { + + use crate::http::axum_implementation::query::Query; + use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH_SCRAPE_PARAM}; + use crate::protocol::info_hash::InfoHash; + + #[test] + fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { + let raw_query = Query::from(vec![( + INFO_HASH_SCRAPE_PARAM, + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0", + )]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let scrape_request = Scrape::try_from(query).unwrap(); + + assert_eq!( + scrape_request, + Scrape { + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], + } + ); + } + + mod when_it_is_instantiated_from_the_url_query_params { + + use crate::http::axum_implementation::query::Query; + use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH_SCRAPE_PARAM}; + + #[test] + fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { + let raw_query_without_info_hash = "another_param=NOT_RELEVANT"; + + assert!(Scrape::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_info_hash_param_is_invalid() { + let raw_query = Query::from(vec![(INFO_HASH_SCRAPE_PARAM, "INVALID_INFO_HASH_VALUE")]).to_string(); + + assert!(Scrape::try_from(raw_query.parse::().unwrap()).is_err()); + } + } + } +} diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 6138f5acf..1d4d67e73 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -4,15 +4,17 @@ use axum::routing::get; use axum::Router; use axum_client_ip::SecureClientIpSource; -use super::handlers::announce::handle; -use super::handlers::status::get_status_handler; +use super::handlers::{announce, scrape, status}; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { Router::new() // Status - .route("/status", get(get_status_handler)) + .route("/status", get(status::handle)) // Announce request - .route("/announce", get(handle).with_state(tracker.clone())) + .route("/announce", get(announce::handle).with_state(tracker.clone())) + // Scrape request + .route("/scrape", get(scrape::handle).with_state(tracker.clone())) + // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) } From 0cab696061eade23a0080c4a802e67df2c8c939a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Feb 2023 18:28:31 +0000 Subject: [PATCH 339/435] feat(http): [#191] add cargo dependency: multimap THat dependency will be use to store URL query param in a MultiMap struct, becuase query params can have multiple values like this: ``` param1=value1¶m1=value2 ``` The multimaps allows to add multiple values to a HashMap. --- Cargo.lock | 10 ++++++++++ Cargo.toml | 1 + cSpell.json | 1 + 3 files changed, 12 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 05b439353..cfd8aaba8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1477,6 +1477,15 @@ dependencies = [ "syn", ] +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +dependencies = [ + "serde", +] + [[package]] name = "multipart" version = "0.18.0" @@ -2945,6 +2954,7 @@ dependencies = [ "local-ip-address", "log", "mockall", + "multimap", "openssl", "percent-encoding", "r2d2", diff --git a/Cargo.toml b/Cargo.toml index 917bc9e31..fa126a152 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,6 +62,7 @@ axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } axum-client-ip = "0.4.0" bip_bencode = "0.4.4" +multimap = "0.8.3" [dev-dependencies] diff --git a/cSpell.json b/cSpell.json index a451d18dc..b8aceb568 100644 --- a/cSpell.json +++ b/cSpell.json @@ -37,6 +37,7 @@ "Lphant", "middlewares", "mockall", + "multimap", "myacicontext", "nanos", "nextest", From 30cf3b9d66c4452d74719b0164b0258bd106bd50 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Feb 2023 18:30:58 +0000 Subject: [PATCH 340/435] feat(http): [#192] Query struct allows multiple values for the same param The `torrust_tracker::http::axum_implementation::query` allow mutiple values for the same URL query param, for example: ``` param1=value1¶m2=value2 ``` It's needed in the `scrape` request: http://localhost:7070/scrape?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0 --- src/http/axum_implementation/query.rs | 229 ++++++++++++++++++-------- 1 file changed, 157 insertions(+), 72 deletions(-) diff --git a/src/http/axum_implementation/query.rs b/src/http/axum_implementation/query.rs index cad58c17b..8b01e9db7 100644 --- a/src/http/axum_implementation/query.rs +++ b/src/http/axum_implementation/query.rs @@ -1,19 +1,50 @@ -use std::collections::HashMap; use std::panic::Location; use std::str::FromStr; +use multimap::MultiMap; use thiserror::Error; -/// Represent a URL query component with some restrictions. -/// It does not allow duplicate param names like this: `param1=value1¶m1=value2` -/// It would take the second value for `param1`. +type ParamName = String; +type ParamValue = String; + +/// Represent a URL query component: +/// +/// ```text +/// URI = scheme ":" ["//" authority] path ["?" query] ["#" fragment] +/// ``` +#[derive(Debug)] pub struct Query { /* code-review: - - Consider using `HashMap`, because it does not allow you to add a second value for the same param name. - Consider using a third-party crate. - Conversion from/to string is not deterministic. Params can be in a different order in the query string. */ - params: HashMap, + params: MultiMap, +} + +impl Query { + /// Returns only the first param value even if it has multiple values like this: + /// + /// ```text + /// param1=value1¶m1=value2 + /// ``` + /// + /// In that case `get_param("param1")` will return `value1`. + #[must_use] + pub fn get_param(&self, name: &str) -> Option { + self.params.get(name).map(|pair| pair.value.clone()) + } + + /// Returns all the param values as a vector even if it has only one value. + #[must_use] + pub fn get_param_vec(&self, name: &str) -> Option> { + self.params.get_vec(name).map(|pairs| { + let mut param_values = vec![]; + for pair in pairs { + param_values.push(pair.value.to_string()); + } + param_values + }) + } } #[derive(Error, Debug)] @@ -29,13 +60,14 @@ impl FromStr for Query { type Err = ParseQueryError; fn from_str(raw_query: &str) -> Result { - let mut params: HashMap = HashMap::new(); + let mut params: MultiMap = MultiMap::new(); let raw_params = raw_query.trim().trim_start_matches('?').split('&').collect::>(); for raw_param in raw_params { - let param: Param = raw_param.parse()?; - params.insert(param.name, param.value); + let pair: NameValuePair = raw_param.parse()?; + let param_name = pair.name.clone(); + params.insert(param_name, pair); } Ok(Self { params }) @@ -44,10 +76,10 @@ impl FromStr for Query { impl From> for Query { fn from(raw_params: Vec<(&str, &str)>) -> Self { - let mut params: HashMap = HashMap::new(); + let mut params: MultiMap = MultiMap::new(); for raw_param in raw_params { - params.insert(raw_param.0.to_owned(), raw_param.1.to_owned()); + params.insert(raw_param.0.to_owned(), NameValuePair::new(raw_param.0, raw_param.1)); } Self { params } @@ -58,8 +90,8 @@ impl std::fmt::Display for Query { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let query = self .params - .iter() - .map(|param| format!("{}", Param::new(param.0, param.1))) + .iter_all() + .map(|param| format!("{}", FieldValuePairSet::from_vec(param.1))) .collect::>() .join("&"); @@ -67,20 +99,22 @@ impl std::fmt::Display for Query { } } -impl Query { - #[must_use] - pub fn get_param(&self, name: &str) -> Option { - self.params.get(name).map(std::string::ToString::to_string) - } +#[derive(Debug, PartialEq, Clone)] +struct NameValuePair { + name: ParamName, + value: ParamValue, } -#[derive(Debug, PartialEq)] -struct Param { - name: String, - value: String, +impl NameValuePair { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_owned(), + value: value.to_owned(), + } + } } -impl FromStr for Param { +impl FromStr for NameValuePair { type Err = ParseQueryError; fn from_str(raw_param: &str) -> Result { @@ -100,18 +134,39 @@ impl FromStr for Param { } } -impl std::fmt::Display for Param { +impl std::fmt::Display for NameValuePair { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}={}", self.name, self.value) } } -impl Param { - pub fn new(name: &str, value: &str) -> Self { - Self { - name: name.to_owned(), - value: value.to_owned(), +#[derive(Debug, PartialEq)] +struct FieldValuePairSet { + pairs: Vec, +} + +impl FieldValuePairSet { + fn from_vec(pair_vec: &Vec) -> Self { + let mut pairs: Vec = vec![]; + + for pair in pair_vec { + pairs.push(pair.clone()); } + + Self { pairs } + } +} + +impl std::fmt::Display for FieldValuePairSet { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let query = self + .pairs + .iter() + .map(|pair| format!("{pair}")) + .collect::>() + .join("&"); + + write!(f, "{query}") } } @@ -136,6 +191,14 @@ mod tests { assert_eq!(query.get_param("port").unwrap(), "17548"); } + #[test] + fn should_be_instantiated_from_a_string_pair_vector() { + let query = Query::from(vec![("param1", "value1"), ("param2", "value2")]); + + assert_eq!(query.get_param("param1"), Some("value1".to_string())); + assert_eq!(query.get_param("param2"), Some("value2".to_string())); + } + #[test] fn should_fail_parsing_an_invalid_query_string() { let invalid_raw_query = "name=value=value"; @@ -151,7 +214,7 @@ mod tests { let query = raw_query.parse::().unwrap(); - assert_eq!(query.get_param("name").unwrap(), "value"); + assert_eq!(query.get_param("name"), Some("value".to_string())); } #[test] @@ -160,61 +223,83 @@ mod tests { let query = raw_query.parse::().unwrap(); - assert_eq!(query.get_param("name").unwrap(), "value"); - } - - #[test] - fn should_be_instantiated_from_a_string_pair_vector() { - let query = Query::from(vec![("param1", "value1"), ("param2", "value2")]).to_string(); - - assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + assert_eq!(query.get_param("name"), Some("value".to_string())); } - #[test] - fn should_not_allow_more_than_one_value_for_the_same_param() { - let query = Query::from(vec![("param1", "value1"), ("param1", "value2"), ("param1", "value3")]).to_string(); - - assert_eq!(query, "param1=value3"); + mod should_allow_more_than_one_value_for_the_same_param { + use crate::http::axum_implementation::query::Query; + + #[test] + fn instantiated_from_a_vector() { + let query1 = Query::from(vec![("param1", "value1"), ("param1", "value2")]); + assert_eq!( + query1.get_param_vec("param1"), + Some(vec!["value1".to_string(), "value2".to_string()]) + ); + } + + #[test] + fn parsed_from_an_string() { + let query2 = "param1=value1¶m1=value2".parse::().unwrap(); + assert_eq!( + query2.get_param_vec("param1"), + Some(vec!["value1".to_string(), "value2".to_string()]) + ); + } } - #[test] - fn should_be_displayed() { - let query = "param1=value1¶m2=value2".parse::().unwrap().to_string(); - - assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + mod should_be_displayed { + use crate::http::axum_implementation::query::Query; + + #[test] + fn with_one_param() { + assert_eq!("param1=value1".parse::().unwrap().to_string(), "param1=value1"); + } + + #[test] + fn with_multiple_params() { + let query = "param1=value1¶m2=value2".parse::().unwrap().to_string(); + assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + } + + #[test] + fn with_multiple_values_for_the_same_param() { + let query = "param1=value1¶m1=value2".parse::().unwrap().to_string(); + assert!(query == "param1=value1¶m1=value2" || query == "param1=value2¶m1=value1"); + } } - } - mod url_query_param { - use crate::http::axum_implementation::query::Param; + mod param_name_value_pair { + use crate::http::axum_implementation::query::NameValuePair; - #[test] - fn should_parse_a_single_query_param() { - let raw_param = "name=value"; + #[test] + fn should_parse_a_single_query_param() { + let raw_param = "name=value"; - let param = raw_param.parse::().unwrap(); + let param = raw_param.parse::().unwrap(); - assert_eq!( - param, - Param { - name: "name".to_string(), - value: "value".to_string(), - } - ); - } + assert_eq!( + param, + NameValuePair { + name: "name".to_string(), + value: "value".to_string(), + } + ); + } - #[test] - fn should_fail_parsing_an_invalid_query_param() { - let invalid_raw_param = "name=value=value"; + #[test] + fn should_fail_parsing_an_invalid_query_param() { + let invalid_raw_param = "name=value=value"; - let query = invalid_raw_param.parse::(); + let query = invalid_raw_param.parse::(); - assert!(query.is_err()); - } + assert!(query.is_err()); + } - #[test] - fn should_be_displayed() { - assert_eq!("name=value".parse::().unwrap().to_string(), "name=value"); + #[test] + fn should_be_displayed() { + assert_eq!("name=value".parse::().unwrap().to_string(), "name=value"); + } } } } From 2de8265eaba098f5c69cd8cbfcbc37f05d958044 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Feb 2023 18:33:47 +0000 Subject: [PATCH 341/435] feat(http): [#191] parse scrape req with multiple infohashes --- .../axum_implementation/requests/scrape.rs | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/axum_implementation/requests/scrape.rs index 483738a03..0f23039bb 100644 --- a/src/http/axum_implementation/requests/scrape.rs +++ b/src/http/axum_implementation/requests/scrape.rs @@ -60,19 +60,20 @@ impl TryFrom for Scrape { } fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryError> { - match query.get_param(INFO_HASH_SCRAPE_PARAM) { - Some(raw_param) => { + match query.get_param_vec(INFO_HASH_SCRAPE_PARAM) { + Some(raw_params) => { let mut info_hashes = vec![]; - // todo: multiple infohashes + for raw_param in raw_params { + let info_hash = + percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { + param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?; - let info_hash = percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { - param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), - param_value: raw_param.clone(), - source: Located(err).into(), - })?; - - info_hashes.push(info_hash); + info_hashes.push(info_hash); + } Ok(info_hashes) } From 0c7735a0b14a03f1268daa41b232d0918cbfe37f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Feb 2023 18:34:13 +0000 Subject: [PATCH 342/435] fix(http): typo in comment --- src/http/percent_encoding.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http/percent_encoding.rs b/src/http/percent_encoding.rs index 9b5b79ed7..3774519fb 100644 --- a/src/http/percent_encoding.rs +++ b/src/http/percent_encoding.rs @@ -3,7 +3,7 @@ use crate::tracker::peer::{self, IdConversionError}; /// # Errors /// -/// Will return `Err` if if the decoded bytes do not represent a valid `InfoHash`. +/// Will return `Err` if the decoded bytes do not represent a valid `InfoHash`. pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); InfoHash::try_from(bytes) From c4bee79c7ad15018bffbf39e69e663022aac16b6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Feb 2023 16:43:06 +0000 Subject: [PATCH 343/435] feat(http): [#191] add Tracker::scrape function This function returns the data we need for a scrape response regardless the method that the client is using to communicate with the tracker (UDP or HTTP). --- .../axum_implementation/handlers/scrape.rs | 14 +- src/tracker/mod.rs | 348 +++++++++++++----- src/tracker/torrent.rs | 18 + 3 files changed, 286 insertions(+), 94 deletions(-) diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 094bf844b..2246ea7db 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -9,11 +9,21 @@ use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn handle( - State(_tracker): State>, + State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, _remote_client_ip: RemoteClientIp, ) -> String { debug!("http scrape request: {:#?}", &scrape_request); - format!("{:#?}", &scrape_request) + /* + todo: + - Add the service that sends the event for statistics. + - Build the HTTP bencoded response. + */ + + let scrape_data = tracker.scrape(&scrape_request.info_hashes).await; + + debug!("scrape data: {:#?}", &scrape_data); + + "todo".to_string() } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index e01fe6a19..0a3bd7c0b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -7,7 +7,7 @@ pub mod statistics; pub mod torrent; use std::collections::btree_map::Entry; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; @@ -18,7 +18,7 @@ use tokio::sync::{RwLock, RwLockReadGuard}; use self::error::Error; use self::peer::Peer; -use self::torrent::SwamStats; +use self::torrent::{SwamStats, SwarmMetadata}; use crate::config::Configuration; use crate::databases::driver::Driver; use crate::databases::{self, Database}; @@ -50,6 +50,27 @@ pub struct AnnounceData { pub interval_min: u32, } +#[derive(Debug, PartialEq, Default)] +pub struct ScrapeData { + files: HashMap, +} + +impl ScrapeData { + #[must_use] + pub fn empty() -> Self { + let files: HashMap = HashMap::new(); + Self { files } + } + + pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) { + self.files.insert(*info_hash, swarm_metadata); + } + + pub fn add_file_with_no_metadata(&mut self, info_hash: &InfoHash) { + self.files.insert(*info_hash, SwarmMetadata::default()); + } +} + impl Tracker { /// # Errors /// @@ -85,8 +106,14 @@ impl Tracker { self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } - /// It handles an announce request + /// It handles an announce request. + /// + /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { + // code-review: maybe instead of mutating the peer we could just return + // a tuple with the new peer and the announce data: (Peer, AnnounceData). + // It could even be a different struct: `StoredPeer` or `PublicPeer`. + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -101,6 +128,27 @@ impl Tracker { } } + /// It handles a scrape request. + /// + /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). + pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { + let mut scrape_data = ScrapeData::empty(); + + for info_hash in info_hashes { + scrape_data.add_file(info_hash, self.get_swarm_metadata(info_hash).await); + } + + scrape_data + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + let torrents = self.get_torrents().await; + match torrents.get(info_hash) { + Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + None => SwarmMetadata::default(), + } + } + /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. @@ -416,143 +464,259 @@ fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Opt #[cfg(test)] mod tests { - use std::sync::Arc; - use super::statistics::Keeper; - use super::{TorrentsMetrics, Tracker}; - use crate::config::{ephemeral_configuration, Configuration}; + mod the_tracker { - pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) - } + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::tracker::peer::{self, Peer}; + use crate::tracker::statistics::Keeper; + use crate::tracker::{TorrentsMetrics, Tracker}; + + pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral_configuration()) + } - pub fn tracker_factory() -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + pub fn tracker_factory() -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - // Configuration - let configuration = tracker_configuration(); + // Configuration + let configuration = tracker_configuration(); - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - // Initialize Torrust tracker - match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) + // Initialize Torrust tracker + match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } } } - } - #[tokio::test] - async fn the_tracker_should_collect_torrent_metrics() { - let tracker = tracker_factory(); - - let torrents_metrics = tracker.get_torrents_metrics().await; + /// A peer that has completed downloading. + fn complete_peer() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } - assert_eq!( - torrents_metrics, - TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, - torrents: 0 + /// A peer that has NOT completed downloading. + fn incomplete_peer() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(1000), // Still bytes to download + event: AnnounceEvent::Started, } - ); - } + } - mod the_tracker_assigning_the_ip_to_the_peer { + #[tokio::test] + async fn should_collect_torrent_metrics() { + let tracker = tracker_factory(); - use std::net::{IpAddr, Ipv4Addr}; + let torrents_metrics = tracker.get_torrents_metrics().await; - use crate::tracker::assign_ip_address_to_peer; + assert_eq!( + torrents_metrics, + TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 0, + torrents: 0 + } + ); + } - #[test] - fn should_use_the_source_ip_instead_of_the_ip_in_the_announce_request() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + mod handling_an_announce_request { + mod should_assign_the_ip_to_the_peer { - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + use std::net::{IpAddr, Ipv4Addr}; - assert_eq!(peer_ip, remote_ip); - } + use crate::tracker::assign_ip_address_to_peer; - mod when_the_client_ip_is_a_ipv4_loopback_ip { + #[test] + fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - use crate::tracker::assign_ip_address_to_peer; + assert_eq!(peer_ip, remote_ip); + } - #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + mod and_when_the_client_ip_is_a_ipv4_loopback_ip { - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; - assert_eq!(peer_ip, remote_ip); - } + use crate::tracker::assign_ip_address_to_peer; - #[test] - fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + assert_eq!(peer_ip, remote_ip); + } - assert_eq!(peer_ip, tracker_external_ip); - } + #[test] + fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( + ) { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + + mod and_when_client_ip_is_a_ipv6_loopback_ip { - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip() - { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; - let tracker_external_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + use crate::tracker::assign_ip_address_to_peer; - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - assert_eq!(peer_ip, tracker_external_ip); + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( + ) { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } } } - mod when_client_ip_is_a_ipv6_loopback_ip { + mod handling_a_scrape_request { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; + use std::net::{IpAddr, Ipv4Addr}; - use crate::tracker::assign_ip_address_to_peer; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, tracker_factory}; + use crate::tracker::{ScrapeData, SwarmMetadata}; - #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + #[tokio::test] + async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent() { + let tracker = tracker_factory(); - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; - assert_eq!(peer_ip, remote_ip); - } + let scrape_data = tracker.scrape(&info_hashes).await; - #[test] - fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + let mut expected_scrape_data = ScrapeData::empty(); - let tracker_external_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + assert_eq!(scrape_data, expected_scrape_data); + } - assert_eq!(peer_ip, tracker_external_ip); + #[tokio::test] + async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { + let tracker = tracker_factory(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + // Announce a "complete" peer for the torrent + let mut complete_peer = complete_peer(); + tracker + .announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))) + .await; + + // Announce an "incomplete" peer for the torrent + let mut incomplete_peer = incomplete_peer(); + tracker + .announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))) + .await; + + // Scrape + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 0, // the "complete" peer does not count because it was not previously known + downloaded: 0, + incomplete: 1, // the "incomplete" peer we have just announced + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); } - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip() - { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + #[tokio::test] + async fn it_should_allow_scraping_for_multiple_torrents() { + let tracker = tracker_factory(); + + let info_hashes = vec![ + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), + ]; - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let scrape_data = tracker.scrape(&info_hashes).await; - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[1]); - assert_eq!(peer_ip, tracker_external_ip); + assert_eq!(scrape_data, expected_scrape_data); } } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 3161cd36b..34017599d 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -14,6 +14,13 @@ pub struct Entry { pub completed: u32, } +#[derive(Debug, PartialEq, Default)] +pub struct SwarmMetadata { + pub complete: u32, // The number of active peers that have completed downloading + pub downloaded: u32, // The number of peers that have ever completed downloading + pub incomplete: u32, // The number of active peers that have not completed downloading +} + impl Entry { #[must_use] pub fn new() -> Entry { @@ -74,6 +81,17 @@ impl Entry { (seeders, self.completed, leechers) } + #[must_use] + pub fn get_swarm_metadata(&self) -> SwarmMetadata { + // code-review: consider using always this function instead of `get_stats`. + let (seeders, completed, leechers) = self.get_stats(); + SwarmMetadata { + complete: seeders, + downloaded: completed, + incomplete: leechers, + } + } + pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); self.peers.retain(|_, peer| peer.updated > current_cutoff); From ae1a076c57bc74fbd73dc42e54df373513c642d3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Feb 2023 17:23:47 +0000 Subject: [PATCH 344/435] feat(http): [#191] add scrape app service --- .../axum_implementation/extractors/peer_ip.rs | 2 +- .../axum_implementation/handlers/announce.rs | 4 ++-- .../axum_implementation/handlers/scrape.rs | 21 +++++++++++++------ .../axum_implementation/services/announce.rs | 4 ++-- src/http/axum_implementation/services/mod.rs | 1 + .../axum_implementation/services/scrape.rs | 20 ++++++++++++++++++ src/tracker/statistics.rs | 2 ++ tests/http_tracker.rs | 6 ++---- 8 files changed, 45 insertions(+), 15 deletions(-) create mode 100644 src/http/axum_implementation/services/scrape.rs diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs index 9f7e92a9b..aae348d99 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -31,7 +31,7 @@ impl From for responses::error::Error { /// /// Will return an error if the peer IP cannot be obtained according to the configuration. /// For example, if the IP is extracted from an HTTP header which is missing in the request. -pub fn assign_ip_address_to_peer(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { +pub fn resolve(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { if on_reverse_proxy { if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { Ok(ip) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 81f57e810..d5fa7f3a4 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -7,7 +7,7 @@ use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::peer_ip::assign_ip_address_to_peer; +use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; use crate::http::axum_implementation::responses::announce; @@ -24,7 +24,7 @@ pub async fn handle( ) -> Response { debug!("http announce request: {:#?}", announce_request); - let peer_ip = match assign_ip_address_to_peer(tracker.config.on_reverse_proxy, &remote_client_ip) { + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, }; diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 2246ea7db..1f1d3ece9 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,29 +1,38 @@ use std::sync::Arc; use axum::extract::State; +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; use log::debug; +use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; +use crate::http::axum_implementation::services; use crate::tracker::Tracker; #[allow(clippy::unused_async)] pub async fn handle( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - _remote_client_ip: RemoteClientIp, -) -> String { + remote_client_ip: RemoteClientIp, +) -> Response { debug!("http scrape request: {:#?}", &scrape_request); /* todo: - - Add the service that sends the event for statistics. - - Build the HTTP bencoded response. + - [x] Add the service that sends the event for statistics. + - [ ] Build the HTTP bencoded response. */ - let scrape_data = tracker.scrape(&scrape_request.info_hashes).await; + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { + Ok(peer_ip) => peer_ip, + Err(err) => return err, + }; + + let scrape_data = services::scrape::invoke(tracker.clone(), &scrape_request.info_hashes, &peer_ip).await; debug!("scrape data: {:#?}", &scrape_data); - "todo".to_string() + (StatusCode::OK, "todo").into_response() } diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 6378c3008..356dbaeb9 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -9,7 +9,7 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip - let response = tracker.announce(&info_hash, peer, &original_peer_ip).await; + let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip).await; match original_peer_ip { IpAddr::V4(_) => { @@ -20,5 +20,5 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) } } - response + announce_data } diff --git a/src/http/axum_implementation/services/mod.rs b/src/http/axum_implementation/services/mod.rs index 74894de33..776d2dfbf 100644 --- a/src/http/axum_implementation/services/mod.rs +++ b/src/http/axum_implementation/services/mod.rs @@ -1 +1,2 @@ pub mod announce; +pub mod scrape; diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs new file mode 100644 index 000000000..f40b8f999 --- /dev/null +++ b/src/http/axum_implementation/services/scrape.rs @@ -0,0 +1,20 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use crate::protocol::info_hash::InfoHash; +use crate::tracker::{statistics, ScrapeData, Tracker}; + +pub async fn invoke(tracker: Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { + let scrape_data = tracker.scrape(info_hashes).await; + + match original_peer_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; + } + } + + scrape_data +} diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index f9f6253fd..f9079962c 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -11,6 +11,8 @@ const CHANNEL_BUFFER_SIZE: usize = 65_535; #[derive(Debug, PartialEq, Eq)] pub enum Event { + // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } + // Attributes are enums too. Tcp4Announce, Tcp4Scrape, Tcp6Announce, diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a09802724..d324e560b 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2354,8 +2354,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { let http_tracker = start_public_http_tracker(Version::Axum).await; @@ -2374,8 +2373,7 @@ mod axum_http_tracker_server { assert_eq!(stats.tcp4_scrapes_handled, 1); } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { let http_tracker = start_ipv6_http_tracker(Version::Axum).await; From 86ce93cb9e0d5113bccaeba9c16abe6ffdeafcad Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Feb 2023 13:07:00 +0000 Subject: [PATCH 345/435] feat(http): [#192] scrape request for Axum HTTP tracker --- .../axum_implementation/handlers/scrape.rs | 13 +-- .../axum_implementation/requests/scrape.rs | 28 ++--- src/http/axum_implementation/responses/mod.rs | 1 + .../axum_implementation/responses/scrape.rs | 106 ++++++++++++++++++ src/tracker/mod.rs | 2 +- tests/http/asserts.rs | 46 +++++--- tests/http_tracker.rs | 30 +++-- 7 files changed, 162 insertions(+), 64 deletions(-) create mode 100644 src/http/axum_implementation/responses/scrape.rs diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 1f1d3ece9..51b6fa84d 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,14 +1,13 @@ use std::sync::Arc; use axum::extract::State; -use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; -use crate::http::axum_implementation::services; +use crate::http::axum_implementation::{responses, services}; use crate::tracker::Tracker; #[allow(clippy::unused_async)] @@ -19,12 +18,6 @@ pub async fn handle( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - /* - todo: - - [x] Add the service that sends the event for statistics. - - [ ] Build the HTTP bencoded response. - */ - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, @@ -32,7 +25,5 @@ pub async fn handle( let scrape_data = services::scrape::invoke(tracker.clone(), &scrape_request.info_hashes, &peer_ip).await; - debug!("scrape data: {:#?}", &scrape_data); - - (StatusCode::OK, "todo").into_response() + responses::scrape::Bencoded::from(scrape_data).into_response() } diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/axum_implementation/requests/scrape.rs index 0f23039bb..da50d4be5 100644 --- a/src/http/axum_implementation/requests/scrape.rs +++ b/src/http/axum_implementation/requests/scrape.rs @@ -10,8 +10,8 @@ use crate::protocol::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; -// Query param name -const INFO_HASH_SCRAPE_PARAM: &str = "info_hash"; +// Query param names +const INFO_HASH: &str = "info_hash"; #[derive(Debug, PartialEq)] pub struct Scrape { @@ -27,12 +27,6 @@ pub enum ParseScrapeQueryError { location: &'static Location<'static>, param_name: String, }, - #[error("invalid param value {param_value} for {param_name} in {location}")] - InvalidParam { - param_name: String, - param_value: String, - location: &'static Location<'static>, - }, #[error("invalid param value {param_value} for {param_name} in {source}")] InvalidInfoHashParam { param_name: String, @@ -60,14 +54,14 @@ impl TryFrom for Scrape { } fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryError> { - match query.get_param_vec(INFO_HASH_SCRAPE_PARAM) { + match query.get_param_vec(INFO_HASH) { Some(raw_params) => { let mut info_hashes = vec![]; for raw_param in raw_params { let info_hash = percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { - param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + param_name: INFO_HASH.to_owned(), param_value: raw_param.clone(), source: Located(err).into(), })?; @@ -80,7 +74,7 @@ fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryE None => { return Err(ParseScrapeQueryError::MissingParam { location: Location::caller(), - param_name: INFO_HASH_SCRAPE_PARAM.to_owned(), + param_name: INFO_HASH.to_owned(), }) } } @@ -92,16 +86,12 @@ mod tests { mod scrape_request { use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH_SCRAPE_PARAM}; + use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH}; use crate::protocol::info_hash::InfoHash; #[test] fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { - let raw_query = Query::from(vec![( - INFO_HASH_SCRAPE_PARAM, - "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0", - )]) - .to_string(); + let raw_query = Query::from(vec![(INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0")]).to_string(); let query = raw_query.parse::().unwrap(); @@ -118,7 +108,7 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH_SCRAPE_PARAM}; + use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH}; #[test] fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { @@ -129,7 +119,7 @@ mod tests { #[test] fn it_should_fail_if_the_info_hash_param_is_invalid() { - let raw_query = Query::from(vec![(INFO_HASH_SCRAPE_PARAM, "INVALID_INFO_HASH_VALUE")]).to_string(); + let raw_query = Query::from(vec![(INFO_HASH, "INVALID_INFO_HASH_VALUE")]).to_string(); assert!(Scrape::try_from(raw_query.parse::().unwrap()).is_err()); } diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs index ad7d0a78c..7e8666934 100644 --- a/src/http/axum_implementation/responses/mod.rs +++ b/src/http/axum_implementation/responses/mod.rs @@ -1,3 +1,4 @@ pub mod announce; pub mod error; pub mod ok; +pub mod scrape; diff --git a/src/http/axum_implementation/responses/scrape.rs b/src/http/axum_implementation/responses/scrape.rs new file mode 100644 index 000000000..3fc34a0e5 --- /dev/null +++ b/src/http/axum_implementation/responses/scrape.rs @@ -0,0 +1,106 @@ +use std::borrow::Cow; + +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use bip_bencode::{ben_int, ben_map, BMutAccess}; + +use crate::tracker::ScrapeData; + +#[derive(Debug, PartialEq, Default)] +pub struct Bencoded { + scrape_data: ScrapeData, +} + +impl Bencoded { + /// # Panics + /// + /// Will return an error if it can't access the bencode as a mutable `BDictAccess`. + #[must_use] + pub fn body(&self) -> Vec { + let mut scrape_list = ben_map!(); + + let scrape_list_mut = scrape_list.dict_mut().unwrap(); + + for (info_hash, value) in &self.scrape_data.files { + scrape_list_mut.insert( + Cow::from(info_hash.bytes().to_vec()), + ben_map! { + "complete" => ben_int!(i64::from(value.complete)), + "downloaded" => ben_int!(i64::from(value.downloaded)), + "incomplete" => ben_int!(i64::from(value.incomplete)) + }, + ); + } + + (ben_map! { + "files" => scrape_list + }) + .encode() + } +} + +impl From for Bencoded { + fn from(scrape_data: ScrapeData) -> Self { + Self { scrape_data } + } +} + +impl IntoResponse for Bencoded { + fn into_response(self) -> Response { + (StatusCode::OK, self.body()).into_response() + } +} + +#[cfg(test)] +mod tests { + + mod scrape_response { + use crate::http::axum_implementation::responses::scrape::Bencoded; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::torrent::SwarmMetadata; + use crate::tracker::ScrapeData; + + fn sample_scrape_data() -> ScrapeData { + let info_hash = InfoHash([0x69; 20]); + let mut scrape_data = ScrapeData::empty(); + scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 1, + downloaded: 2, + incomplete: 3, + }, + ); + scrape_data + } + + #[test] + fn should_be_converted_from_scrape_data() { + let response = Bencoded::from(sample_scrape_data()); + + assert_eq!( + response, + Bencoded { + scrape_data: sample_scrape_data() + } + ); + } + + #[test] + fn should_be_bencoded() { + let response = Bencoded { + scrape_data: sample_scrape_data(), + }; + + let bytes = response.body(); + + // cspell:disable-next-line + let expected_bytes = b"d5:filesd20:iiiiiiiiiiiiiiiiiiiid8:completei1e10:downloadedi2e10:incompletei3eeee"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); + } + } +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 0a3bd7c0b..3e5e97439 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -52,7 +52,7 @@ pub struct AnnounceData { #[derive(Debug, PartialEq, Default)] pub struct ScrapeData { - files: HashMap, + pub files: HashMap, } impl ScrapeData { diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index a10edc9e6..cd45571da 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -78,6 +78,36 @@ pub async fn assert_is_announce_response(response: Response) { // Error responses +// Specific errors for announce request + +pub async fn assert_missing_query_params_for_announce_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for announce request", + Location::caller(), + ); +} + +pub async fn assert_bad_announce_request_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(" for announce request: {failure}")).await; +} + +// Specific errors for scrape request + +pub async fn assert_missing_query_params_for_scrape_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for scrape request", + Location::caller(), + ); +} + +// Other errors + pub async fn assert_internal_server_error_response(response: Response) { assert_eq!(response.status(), 200); @@ -156,22 +186,6 @@ pub async fn assert_invalid_remote_address_on_xff_header_error_response(response ); } -// Specific errors for announce request - -pub async fn assert_missing_query_params_for_announce_request_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "missing query params for announce request", - Location::caller(), - ); -} - -pub async fn assert_bad_announce_request_error_response(response: Response, failure: &str) { - assert_cannot_parse_query_params_error_response(response, &format!(" for announce request: {failure}")).await; -} - pub async fn assert_cannot_parse_query_param_error_response(response: Response, failure: &str) { assert_cannot_parse_query_params_error_response(response, &format!(": {failure}")).await; } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index d324e560b..a341e13ed 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2198,24 +2198,25 @@ mod axum_http_tracker_server { use torrust_tracker::tracker::peer; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; + use crate::http::asserts::{ + assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, + assert_scrape_response, + }; use crate::http::client::Client; use crate::http::requests; use crate::http::requests::scrape::QueryBuilder; use crate::http::responses::scrape::{self, File, ResponseBuilder}; use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; - //#[tokio::test] - #[allow(dead_code)] - async fn should_fail_when_the_request_is_empty() { + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; - assert_internal_server_error_response(response).await; + assert_missing_query_params_for_scrape_request_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { let http_tracker_server = start_public_http_tracker(Version::Axum).await; @@ -2228,13 +2229,11 @@ mod axum_http_tracker_server { .get(&format!("announce?{params}")) .await; - // code-review: it's not returning the invalid info hash error - assert_internal_server_error_response(response).await; + assert_cannot_parse_query_params_error_response(response, "").await; } } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { let http_tracker = start_public_http_tracker(Version::Axum).await; @@ -2272,8 +2271,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { let http_tracker = start_public_http_tracker(Version::Axum).await; @@ -2311,8 +2309,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { let http_tracker = start_public_http_tracker(Version::Axum).await; @@ -2329,8 +2326,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_accept_multiple_infohashes() { let http_tracker = start_public_http_tracker(Version::Axum).await; From 4b3f9793970b9724d9757952cb09b9e0f95101fe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Feb 2023 13:46:17 +0000 Subject: [PATCH 346/435] refactor(udp): [#192] use new tracker::scrape method in UDP tracker --- src/tracker/torrent.rs | 4 ++-- src/udp/handlers.rs | 48 ++++++++++++++++++------------------------ 2 files changed, 22 insertions(+), 30 deletions(-) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 34017599d..dc41b083e 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -16,9 +16,9 @@ pub struct Entry { #[derive(Debug, PartialEq, Default)] pub struct SwarmMetadata { - pub complete: u32, // The number of active peers that have completed downloading + pub complete: u32, // The number of active peers that have completed downloading (seeders) pub downloaded: u32, // The number of peers that have ever completed downloading - pub incomplete: u32, // The number of active peers that have not completed downloading + pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) } impl Entry { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 8978beb70..6c54a6106 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -182,51 +182,43 @@ pub async fn handle_announce( /// # Errors /// /// This function dose not ever return an error. -/// -/// TODO: refactor this, db lock can be a lot shorter pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc, ) -> Result { - let db = tracker.get_torrents().await; + // Convert from aquatic infohashes + let mut info_hashes = vec![]; + for info_hash in &request.info_hashes { + info_hashes.push(InfoHash(info_hash.0)); + } + + let scrape_data = tracker.scrape(&info_hashes).await; let mut torrent_stats: Vec = Vec::new(); - for info_hash in &request.info_hashes { - let info_hash = InfoHash(info_hash.0); - - let scrape_entry = match db.get(&info_hash) { - Some(torrent_info) => { - if tracker.authenticate_request(&info_hash, &None).await.is_ok() { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - #[allow(clippy::cast_possible_truncation)] - TorrentScrapeStatistics { - seeders: NumberOfPeers(i64::from(seeders) as i32), - completed: NumberOfDownloads(i64::from(completed) as i32), - leechers: NumberOfPeers(i64::from(leechers) as i32), - } - } else { - TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), - } - } + for file in &scrape_data.files { + let info_hash = file.0; + let swarm_metadata = file.1; + + let scrape_entry = if tracker.authenticate_request(info_hash, &None).await.is_ok() { + #[allow(clippy::cast_possible_truncation)] + TorrentScrapeStatistics { + seeders: NumberOfPeers(i64::from(swarm_metadata.complete) as i32), + completed: NumberOfDownloads(i64::from(swarm_metadata.downloaded) as i32), + leechers: NumberOfPeers(i64::from(swarm_metadata.incomplete) as i32), } - None => TorrentScrapeStatistics { + } else { + TorrentScrapeStatistics { seeders: NumberOfPeers(0), completed: NumberOfDownloads(0), leechers: NumberOfPeers(0), - }, + } }; torrent_stats.push(scrape_entry); } - drop(db); - // send stats event match remote_addr { SocketAddr::V4(_) => { From 7cdd63ee42b4868734038280c6a4f83e07c511ad Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Feb 2023 18:52:25 +0000 Subject: [PATCH 347/435] refactor: [#171] use KeyId in auth:Key The struct `KeyId` was extracted to wrap the primitive type but it was not being used in the `auth::Key` struct. --- src/apis/resources/auth_key.rs | 14 +++---- src/databases/mod.rs | 3 ++ src/databases/mysql.rs | 8 ++-- src/databases/sqlite.rs | 11 +++--- src/http/warp_implementation/filters.rs | 16 ++++++-- src/http/warp_implementation/handlers.rs | 13 ++++--- src/http/warp_implementation/routes.rs | 6 +-- src/tracker/auth.rs | 49 +++++++++++++----------- src/tracker/error.rs | 4 +- src/tracker/mod.rs | 24 +++++++----- tests/tracker_api.rs | 12 +++--- 11 files changed, 91 insertions(+), 69 deletions(-) diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index d5c08f496..207a0c482 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -3,18 +3,18 @@ use std::convert::From; use serde::{Deserialize, Serialize}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::tracker::auth; +use crate::tracker::auth::{self, KeyId}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { - pub key: String, + pub key: String, // todo: rename to `id` pub valid_until: Option, } impl From for auth::Key { fn from(auth_key_resource: AuthKey) -> Self { auth::Key { - key: auth_key_resource.key, + id: auth_key_resource.key.parse::().unwrap(), valid_until: auth_key_resource .valid_until .map(|valid_until| DurationSinceUnixEpoch::new(valid_until, 0)), @@ -25,7 +25,7 @@ impl From for auth::Key { impl From for AuthKey { fn from(auth_key: auth::Key) -> Self { AuthKey { - key: auth_key.key, + key: auth_key.id.to_string(), valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), } } @@ -37,7 +37,7 @@ mod tests { use super::AuthKey; use crate::protocol::clock::{Current, TimeNow}; - use crate::tracker::auth; + use crate::tracker::auth::{self, KeyId}; #[test] fn it_should_be_convertible_into_an_auth_key() { @@ -51,7 +51,7 @@ mod tests { assert_eq!( auth::Key::from(auth_key_resource), auth::Key { - key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } ); @@ -62,7 +62,7 @@ mod tests { let duration_in_secs = 60; let auth_key = auth::Key { - key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 809decc2c..70cc9eb75 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -63,16 +63,19 @@ pub trait Database: Sync + Send { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; + // todo: replace type `&str` with `&InfoHash` async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error>; async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + // todo: replace type `&str` with `&KeyId` async fn get_key_from_keys(&self, key: &str) -> Result, Error>; async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; + // todo: replace type `&str` with `&KeyId` async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index ac54ebb82..532ba1dcb 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -12,7 +12,7 @@ use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth; +use crate::tracker::auth::{self, KeyId}; const DRIVER: Driver = Driver::MySQL; @@ -117,7 +117,7 @@ impl Database for Mysql { let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| auth::Key { - key, + id: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, )?; @@ -192,7 +192,7 @@ impl Database for Mysql { let key = query?; Ok(key.map(|(key, expiry)| auth::Key { - key, + id: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(expiry.unsigned_abs())), })) } @@ -200,7 +200,7 @@ impl Database for Mysql { async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let key = auth_key.key.to_string(); + let key = auth_key.id.to_string(); let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); conn.exec_drop( diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 3425b15c8..d6915c850 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -9,7 +9,7 @@ use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth; +use crate::tracker::auth::{self, KeyId}; const DRIVER: Driver = Driver::Sqlite3; @@ -108,11 +108,11 @@ impl Database for Sqlite { let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; let keys_iter = stmt.query_map([], |row| { - let key = row.get(0)?; + let key: String = row.get(0)?; let valid_until: i64 = row.get(1)?; Ok(auth::Key { - key, + id: key.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; @@ -211,8 +211,9 @@ impl Database for Sqlite { Ok(key.map(|f| { let expiry: i64 = f.get(1).unwrap(); + let id: String = f.get(0).unwrap(); auth::Key { - key: f.get(0).unwrap(), + id: id.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs())), } })) @@ -223,7 +224,7 @@ impl Database for Sqlite { let insert = conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.key.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], + [auth_key.id.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], )?; if insert == 0 { diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index fc8ef20bc..eb7abcd4d 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -1,6 +1,7 @@ use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; use std::panic::Location; +use std::str::FromStr; use std::sync::Arc; use warp::{reject, Filter, Rejection}; @@ -11,7 +12,8 @@ use super::{request, WebResult}; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::{self, auth, peer}; +use crate::tracker::auth::KeyId; +use crate::tracker::{self, peer}; /// Pass Arc along #[must_use] @@ -35,10 +37,16 @@ pub fn with_peer_id() -> impl Filter + /// Pass Arc along #[must_use] -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key_id() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key: String| auth::Key::from_string(&key)) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) + .map(|key: String| { + let key_id = KeyId::from_str(&key); + match key_id { + Ok(id) => Some(id), + Err(_) => None, + } + }) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for `PeerAddress` diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 400cc5762..6019bf016 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -12,6 +12,7 @@ use super::error::Error; use super::{request, response, WebResult}; use crate::http::warp_implementation::peer_builder; use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth::KeyId; use crate::tracker::{self, auth, peer, statistics, torrent}; /// Authenticate `InfoHash` using optional `auth::Key` @@ -21,11 +22,11 @@ use crate::tracker::{self, auth, peer, statistics, torrent}; /// Will return `ServerError` that wraps the `tracker::error::Error` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, - auth_key: &Option, + auth_key_id: &Option, tracker: Arc, ) -> Result<(), Error> { tracker - .authenticate_request(info_hash, auth_key) + .authenticate_request(info_hash, auth_key_id) .await .map_err(|e| Error::TrackerError { source: (Arc::new(e) as Arc).into(), @@ -37,7 +38,7 @@ pub async fn authenticate( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key: Option, + auth_key_id: Option, tracker: Arc, ) -> WebResult { debug!("http announce request: {:#?}", announce_request); @@ -45,7 +46,7 @@ pub async fn handle_announce( let info_hash = announce_request.info_hash; let remote_client_ip = announce_request.peer_addr; - authenticate(&info_hash, &auth_key, tracker.clone()).await?; + authenticate(&info_hash, &auth_key_id, tracker.clone()).await?; let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); @@ -77,7 +78,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key: Option, + auth_key_id: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); @@ -86,7 +87,7 @@ pub async fn handle_scrape( for info_hash in &scrape_request.info_hashes { let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { - if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { + if authenticate(info_hash, &auth_key_id, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); response::ScrapeEntry { complete: seeders, diff --git a/src/http/warp_implementation/routes.rs b/src/http/warp_implementation/routes.rs index c46c502e4..2ee60e8c9 100644 --- a/src/http/warp_implementation/routes.rs +++ b/src/http/warp_implementation/routes.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use super::filters::{with_announce_request, with_auth_key_id, with_scrape_request, with_tracker}; use super::handlers::{handle_announce, handle_scrape, send_error}; use crate::tracker; @@ -20,7 +20,7 @@ fn announce(tracker: Arc) -> impl Filter) -> impl Filter Key { - let key: String = thread_rng() + let random_id: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) .map(char::from) .collect(); - debug!("Generated key: {}, valid for: {:?} seconds", key, lifetime); + debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); Key { - key, + id: random_id.parse::().unwrap(), valid_until: Some(Current::add(&lifetime).unwrap()), } } @@ -54,16 +54,14 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { } None => Err(Error::UnableToReadKey { location: Location::caller(), - key: Box::new(auth_key.clone()), + key_id: Box::new(auth_key.id.clone()), }), } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct Key { - // todo: replace key field definition with: - // pub key: KeyId, - pub key: String, + pub id: KeyId, pub valid_until: Option, } @@ -72,7 +70,7 @@ impl std::fmt::Display for Key { write!( f, "key: `{}`, valid until `{}`", - self.key, + self.id, match self.valid_until { Some(duration) => format!( "{}", @@ -91,20 +89,29 @@ impl std::fmt::Display for Key { } impl Key { + /// # Panics + /// + /// Will panic if bytes cannot be converted into a valid `KeyId`. #[must_use] pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(Key { key, valid_until: None }) + Some(Key { + id: key.parse::().unwrap(), + valid_until: None, + }) } else { None } } + /// # Panics + /// + /// Will panic if string cannot be converted into a valid `KeyId`. #[must_use] pub fn from_string(key: &str) -> Option { if key.len() == AUTH_KEY_LENGTH { Some(Key { - key: key.to_string(), + id: key.parse::().unwrap(), valid_until: None, }) } else { @@ -112,18 +119,13 @@ impl Key { } } - /// # Panics - /// - /// Will fail if the key id is not a valid key id. #[must_use] pub fn id(&self) -> KeyId { - // todo: replace the type of field `key` with type `KeyId`. - // The constructor should fail if an invalid KeyId is provided. - KeyId::from_str(&self.key).unwrap() + self.id.clone() } } -#[derive(Debug, Display, PartialEq, Clone)] +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] pub struct KeyId(String); #[derive(Debug, PartialEq, Eq)] @@ -148,10 +150,10 @@ pub enum Error { KeyVerificationError { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, - #[error("Failed to read key: {key}, {location}")] + #[error("Failed to read key: {key_id}, {location}")] UnableToReadKey { location: &'static Location<'static>, - key: Box, + key_id: Box, }, #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, @@ -171,7 +173,7 @@ mod tests { use std::time::Duration; use crate::protocol::clock::{Current, StoppedTime}; - use crate::tracker::auth; + use crate::tracker::auth::{self, KeyId}; #[test] fn auth_key_from_buffer() { @@ -181,7 +183,10 @@ mod tests { ]); assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().key, "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"); + assert_eq!( + auth_key.unwrap().id, + "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse::().unwrap() + ); } #[test] @@ -190,7 +195,7 @@ mod tests { let auth_key = auth::Key::from_string(key_string); assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().key, key_string); + assert_eq!(auth_key.unwrap().id, key_string.parse::().unwrap()); } #[test] diff --git a/src/tracker/error.rs b/src/tracker/error.rs index 51bcbf3bb..acc85a1c2 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -4,9 +4,9 @@ use crate::located_error::LocatedError; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { - #[error("The supplied key: {key:?}, is not valid: {source}")] + #[error("The supplied key: {key_id:?}, is not valid: {source}")] PeerKeyNotValid { - key: super::auth::Key, + key_id: super::auth::KeyId, source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, #[error("The peer is not authenticated, {location}")] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 3e5e97439..147c889ac 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -16,6 +16,7 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; +use self::auth::KeyId; use self::error::Error; use self::peer::Peer; use self::torrent::{SwamStats, SwarmMetadata}; @@ -27,7 +28,7 @@ use crate::protocol::info_hash::InfoHash; pub struct Tracker { pub config: Arc, mode: mode::Mode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -155,28 +156,31 @@ impl Tracker { pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; - self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + self.keys.write().await.insert(auth_key.id.clone(), auth_key.clone()); Ok(auth_key) } /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` to the database. + /// + /// # Panics + /// + /// Will panic if key cannot be converted into a valid `KeyId`. pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key).await?; - self.keys.write().await.remove(key); + self.keys.write().await.remove(&key.parse::().unwrap()); Ok(()) } /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, auth_key: &auth::Key) -> Result<(), auth::Error> { - // todo: use auth::KeyId for the function argument `auth_key` - match self.keys.read().await.get(&auth_key.key) { + pub async fn verify_auth_key(&self, key_id: &KeyId) -> Result<(), auth::Error> { + match self.keys.read().await.get(key_id) { None => Err(auth::Error::UnableToReadKey { location: Location::caller(), - key: Box::new(auth_key.clone()), + key_id: Box::new(key_id.clone()), }), Some(key) => auth::verify(key), } @@ -192,7 +196,7 @@ impl Tracker { keys.clear(); for key in keys_from_database { - keys.insert(key.key.clone(), key); + keys.insert(key.id.clone(), key); } Ok(()) @@ -283,7 +287,7 @@ impl Tracker { /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { // no authentication needed in public mode if self.is_public() { return Ok(()); @@ -295,7 +299,7 @@ impl Tracker { Some(key) => { if let Err(e) = self.verify_auth_key(key).await { return Err(Error::PeerKeyNotValid { - key: key.clone(), + key_id: key.clone(), source: (Arc::new(e) as Arc).into(), }); } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 193c6487c..bec22e2b4 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -638,7 +638,7 @@ mod tracker_apis { mod for_key_resources { use std::time::Duration; - use torrust_tracker::tracker::auth::Key; + use torrust_tracker::tracker::auth::KeyId; use crate::api::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, @@ -665,7 +665,7 @@ mod tracker_apis { // Verify the key with the tracker assert!(api_server .tracker - .verify_auth_key(&Key::from(auth_key_resource)) + .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); } @@ -734,7 +734,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.key) + .delete_auth_key(&auth_key.id.to_string()) .await; assert_ok(response).await; @@ -777,7 +777,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.key) + .delete_auth_key(&auth_key.id.to_string()) .await; assert_failed_to_delete_key(response).await; @@ -797,7 +797,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key) + .delete_auth_key(&auth_key.id.to_string()) .await; assert_token_not_valid(response).await; @@ -810,7 +810,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key) + .delete_auth_key(&auth_key.id.to_string()) .await; assert_unauthorized(response).await; From 28e655fbd698f64b71c16d78bd3dbd211419d47d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Feb 2023 19:03:16 +0000 Subject: [PATCH 348/435] refactor: [#171] rename auth::Key to auth::ExpiringKey --- src/apis/resources/auth_key.rs | 14 +++++++------- src/databases/mod.rs | 6 +++--- src/databases/mysql.rs | 10 +++++----- src/databases/sqlite.rs | 12 ++++++------ src/tracker/auth.rs | 24 ++++++++++++------------ src/tracker/mod.rs | 4 ++-- 6 files changed, 35 insertions(+), 35 deletions(-) diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index 207a0c482..e9989ca75 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -11,9 +11,9 @@ pub struct AuthKey { pub valid_until: Option, } -impl From for auth::Key { +impl From for auth::ExpiringKey { fn from(auth_key_resource: AuthKey) -> Self { - auth::Key { + auth::ExpiringKey { id: auth_key_resource.key.parse::().unwrap(), valid_until: auth_key_resource .valid_until @@ -22,8 +22,8 @@ impl From for auth::Key { } } -impl From for AuthKey { - fn from(auth_key: auth::Key) -> Self { +impl From for AuthKey { + fn from(auth_key: auth::ExpiringKey) -> Self { AuthKey { key: auth_key.id.to_string(), valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), @@ -49,8 +49,8 @@ mod tests { }; assert_eq!( - auth::Key::from(auth_key_resource), - auth::Key { + auth::ExpiringKey::from(auth_key_resource), + auth::ExpiringKey { id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } @@ -61,7 +61,7 @@ mod tests { fn it_should_be_convertible_from_an_auth_key() { let duration_in_secs = 60; - let auth_key = auth::Key { + let auth_key = auth::ExpiringKey { id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 70cc9eb75..038be0ea3 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -57,7 +57,7 @@ pub trait Database: Sync + Send { async fn load_persistent_torrents(&self) -> Result, Error>; - async fn load_keys(&self) -> Result, Error>; + async fn load_keys(&self) -> Result, Error>; async fn load_whitelist(&self) -> Result, Error>; @@ -71,9 +71,9 @@ pub trait Database: Sync + Send { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; // todo: replace type `&str` with `&KeyId` - async fn get_key_from_keys(&self, key: &str) -> Result, Error>; + async fn get_key_from_keys(&self, key: &str) -> Result, Error>; - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; + async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; // todo: replace type `&str` with `&KeyId` async fn remove_key_from_keys(&self, key: &str) -> Result; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 532ba1dcb..0d545aaa9 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -111,12 +111,12 @@ impl Database for Mysql { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| auth::Key { + |(key, valid_until): (String, i64)| auth::ExpiringKey { id: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, @@ -183,7 +183,7 @@ impl Database for Mysql { Ok(1) } - async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + async fn get_key_from_keys(&self, key: &str) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = @@ -191,13 +191,13 @@ impl Database for Mysql { let key = query?; - Ok(key.map(|(key, expiry)| auth::Key { + Ok(key.map(|(key, expiry)| auth::ExpiringKey { id: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(expiry.unsigned_abs())), })) } - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.id.to_string(); diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index d6915c850..ab0addf4b 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -102,7 +102,7 @@ impl Database for Sqlite { Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { + async fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -111,13 +111,13 @@ impl Database for Sqlite { let key: String = row.get(0)?; let valid_until: i64 = row.get(1)?; - Ok(auth::Key { + Ok(auth::ExpiringKey { id: key.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; - let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -200,7 +200,7 @@ impl Database for Sqlite { } } - async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + async fn get_key_from_keys(&self, key: &str) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -212,14 +212,14 @@ impl Database for Sqlite { Ok(key.map(|f| { let expiry: i64 = f.get(1).unwrap(); let id: String = f.get(0).unwrap(); - auth::Key { + auth::ExpiringKey { id: id.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs())), } })) } - async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 53304657a..22f734e48 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -19,7 +19,7 @@ use crate::protocol::common::AUTH_KEY_LENGTH; /// # Panics /// /// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. -pub fn generate(lifetime: Duration) -> Key { +pub fn generate(lifetime: Duration) -> ExpiringKey { let random_id: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) @@ -28,7 +28,7 @@ pub fn generate(lifetime: Duration) -> Key { debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); - Key { + ExpiringKey { id: random_id.parse::().unwrap(), valid_until: Some(Current::add(&lifetime).unwrap()), } @@ -39,7 +39,7 @@ pub fn generate(lifetime: Duration) -> Key { /// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. /// /// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. -pub fn verify(auth_key: &Key) -> Result<(), Error> { +pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); match auth_key.valid_until { @@ -60,12 +60,12 @@ pub fn verify(auth_key: &Key) -> Result<(), Error> { } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] -pub struct Key { +pub struct ExpiringKey { pub id: KeyId, pub valid_until: Option, } -impl std::fmt::Display for Key { +impl std::fmt::Display for ExpiringKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, @@ -88,14 +88,14 @@ impl std::fmt::Display for Key { } } -impl Key { +impl ExpiringKey { /// # Panics /// /// Will panic if bytes cannot be converted into a valid `KeyId`. #[must_use] - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { + pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(Key { + Some(ExpiringKey { id: key.parse::().unwrap(), valid_until: None, }) @@ -108,9 +108,9 @@ impl Key { /// /// Will panic if string cannot be converted into a valid `KeyId`. #[must_use] - pub fn from_string(key: &str) -> Option { + pub fn from_string(key: &str) -> Option { if key.len() == AUTH_KEY_LENGTH { - Some(Key { + Some(ExpiringKey { id: key.parse::().unwrap(), valid_until: None, }) @@ -177,7 +177,7 @@ mod tests { #[test] fn auth_key_from_buffer() { - let auth_key = auth::Key::from_buffer([ + let auth_key = auth::ExpiringKey::from_buffer([ 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, 49, 52, 110, 114, 74, ]); @@ -192,7 +192,7 @@ mod tests { #[test] fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = auth::Key::from_string(key_string); + let auth_key = auth::ExpiringKey::from_string(key_string); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().id, key_string.parse::().unwrap()); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 147c889ac..0fb434aea 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -28,7 +28,7 @@ use crate::protocol::info_hash::InfoHash; pub struct Tracker { pub config: Arc, mode: mode::Mode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -153,7 +153,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.id.clone(), auth_key.clone()); From f3afab1af86f6e4afd786b1e8d7f8ca721001d6d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Feb 2023 15:26:27 +0000 Subject: [PATCH 349/435] feat(http): [#195] announce request in private mode for the new Auxm HTTP tracker implementation. --- .../axum_implementation/handlers/announce.rs | 43 ++++++++++++++++--- src/http/axum_implementation/handlers/auth.rs | 41 ++++++++++++++++++ src/http/axum_implementation/handlers/mod.rs | 1 + src/http/axum_implementation/routes.rs | 3 +- tests/http/asserts.rs | 22 +++------- tests/http/asserts_warp.rs | 19 ++++++++ tests/http_tracker.rs | 28 +++++------- 7 files changed, 117 insertions(+), 40 deletions(-) create mode 100644 src/http/axum_implementation/handlers/auth.rs diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index d5fa7f3a4..3ad11df51 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -1,39 +1,70 @@ use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use axum::extract::State; +use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::handlers::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; -use crate::http::axum_implementation::responses::announce; +use crate::http::axum_implementation::responses::{self, announce}; use crate::http::axum_implementation::services; use crate::protocol::clock::{Current, Time}; +use crate::tracker::auth::KeyId; use crate::tracker::peer::Peer; use crate::tracker::Tracker; #[allow(clippy::unused_async)] -pub async fn handle( +pub async fn handle_without_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { + if tracker.is_private() { + return responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + }) + .into_response(); + } + + handle(&tracker, &announce_request, &remote_client_ip).await +} + +#[allow(clippy::unused_async)] +pub async fn handle_with_key( + State(tracker): State>, + ExtractRequest(announce_request): ExtractRequest, + Path(key_id): Path, + remote_client_ip: RemoteClientIp, +) -> Response { + debug!("http announce request: {:#?}", announce_request); + + match auth::authenticate(&key_id, &tracker).await { + Ok(_) => (), + Err(error) => return responses::error::Error::from(error).into_response(), + } + + handle(&tracker, &announce_request, &remote_client_ip).await +} + +async fn handle(tracker: &Arc, announce_request: &Announce, remote_client_ip: &RemoteClientIp) -> Response { + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, }; - let mut peer = peer_from_request(&announce_request, &peer_ip); + let mut peer = peer_from_request(announce_request, &peer_ip); let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; - match announce_request.compact { + match &announce_request.compact { Some(compact) => match compact { Compact::Accepted => announce::Compact::from(announce_data).into_response(), Compact::NotAccepted => announce::NonCompact::from(announce_data).into_response(), diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs new file mode 100644 index 000000000..13f5b27e6 --- /dev/null +++ b/src/http/axum_implementation/handlers/auth.rs @@ -0,0 +1,41 @@ +use std::panic::Location; +use std::sync::Arc; + +use thiserror::Error; + +use crate::http::axum_implementation::responses; +use crate::tracker::auth::{self, KeyId}; +use crate::tracker::Tracker; + +#[derive(Debug, Error)] +pub enum Error { + #[error("Missing authentication key for private tracker. Error in {location}")] + MissingAuthKey { location: &'static Location<'static> }, +} + +/// # Errors +/// +/// Will return an error if the the authentication key cannot be verified. +pub async fn authenticate(key_id: &KeyId, tracker: &Arc) -> Result<(), auth::Error> { + if tracker.is_private() { + tracker.verify_auth_key(key_id).await + } else { + Ok(()) + } +} + +impl From for responses::error::Error { + fn from(err: Error) -> Self { + responses::error::Error { + failure_reason: format!("Authentication error: {err}"), + } + } +} + +impl From for responses::error::Error { + fn from(err: auth::Error) -> Self { + responses::error::Error { + failure_reason: format!("Authentication error: {err}"), + } + } +} diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index 4e6849534..0d8aa7f52 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -1,3 +1,4 @@ pub mod announce; +pub mod auth; pub mod scrape; pub mod status; diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 1d4d67e73..646dd0aa3 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -12,7 +12,8 @@ pub fn router(tracker: &Arc) -> Router { // Status .route("/status", get(status::handle)) // Announce request - .route("/announce", get(announce::handle).with_state(tracker.clone())) + .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) + .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) // Scrape request .route("/scrape", get(scrape::handle).with_state(tracker.clone())) // Add extension to get the client IP from the connection info diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index cd45571da..0d5441f89 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -140,22 +140,6 @@ pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) assert_bencoded_error(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); } -pub async fn assert_peer_not_authenticated_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "The peer is not authenticated", - Location::caller(), - ); -} - -pub async fn assert_invalid_authentication_key_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error(&response.text().await.unwrap(), "is not valid", Location::caller()); -} - pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(response: Response) { assert_eq!(response.status(), 200); @@ -199,3 +183,9 @@ pub async fn assert_cannot_parse_query_params_error_response(response: Response, Location::caller(), ); } + +pub async fn assert_authentication_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error(&response.text().await.unwrap(), "Authentication error", Location::caller()); +} diff --git a/tests/http/asserts_warp.rs b/tests/http/asserts_warp.rs index 6bda82f6c..d1a936efa 100644 --- a/tests/http/asserts_warp.rs +++ b/tests/http/asserts_warp.rs @@ -1,7 +1,10 @@ +use std::panic::Location; + /// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. use reqwest::Response; use super::responses::announce_warp::WarpAnnounce; +use crate::http::asserts::assert_bencoded_error; pub async fn assert_warp_announce_response(response: Response, expected_announce_response: &WarpAnnounce) { assert_eq!(response.status(), 200); @@ -13,3 +16,19 @@ pub async fn assert_warp_announce_response(response: Response, expected_announce assert_eq!(announce_response, *expected_announce_response); } + +pub async fn assert_warp_peer_not_authenticated_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "The peer is not authenticated", + Location::caller(), + ); +} + +pub async fn assert_warp_invalid_authentication_key_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error(&response.text().await.unwrap(), "is not valid", Location::caller()); +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a341e13ed..28ed252e9 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1085,9 +1085,9 @@ mod warp_http_tracker_server { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; - use crate::http::asserts::{ - assert_invalid_authentication_key_error_response, assert_is_announce_response, - assert_peer_not_authenticated_error_response, + use crate::http::asserts::assert_is_announce_response; + use crate::http::asserts_warp::{ + assert_warp_invalid_authentication_key_error_response, assert_warp_peer_not_authenticated_error_response, }; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; @@ -1120,7 +1120,7 @@ mod warp_http_tracker_server { .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; - assert_peer_not_authenticated_error_response(response).await; + assert_warp_peer_not_authenticated_error_response(response).await; } #[tokio::test] @@ -1134,7 +1134,7 @@ mod warp_http_tracker_server { .announce(&QueryBuilder::default().query()) .await; - assert_invalid_authentication_key_error_response(response).await; + assert_warp_invalid_authentication_key_error_response(response).await; } } @@ -2539,16 +2539,12 @@ mod axum_http_tracker_server { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::KeyId; - use crate::http::asserts::{ - assert_invalid_authentication_key_error_response, assert_is_announce_response, - assert_peer_not_authenticated_error_response, - }; + use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_private_http_tracker; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_respond_to_authenticated_peers() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; @@ -2565,8 +2561,7 @@ mod axum_http_tracker_server { assert_is_announce_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; @@ -2576,11 +2571,10 @@ mod axum_http_tracker_server { .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; - assert_peer_not_authenticated_error_response(response).await; + assert_authentication_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_if_the_peer_authentication_key_is_not_valid() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; @@ -2591,7 +2585,7 @@ mod axum_http_tracker_server { .announce(&QueryBuilder::default().query()) .await; - assert_invalid_authentication_key_error_response(response).await; + assert_authentication_error_response(response).await; } } From 468009da1f22fecc00b83b6408e1c010e1a15fdb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Feb 2023 16:50:35 +0000 Subject: [PATCH 350/435] feat(http): [#196] scrape request in private mode --- .../axum_implementation/handlers/scrape.rs | 48 +++++++++++++++++-- src/http/axum_implementation/routes.rs | 3 +- .../axum_implementation/services/scrape.rs | 20 ++++++-- src/tracker/mod.rs | 11 +++++ tests/http_tracker.rs | 10 ++-- 5 files changed, 78 insertions(+), 14 deletions(-) diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 51b6fa84d..37e9a9062 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,29 +1,69 @@ use std::sync::Arc; -use axum::extract::State; +use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; +use crate::http::axum_implementation::handlers::auth; +use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::{responses, services}; +use crate::tracker::auth::KeyId; use crate::tracker::Tracker; #[allow(clippy::unused_async)] -pub async fn handle( +pub async fn handle_without_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, &remote_client_ip) { + if tracker.is_private() { + return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await; + } + + handle_real_scrape(&tracker, &scrape_request, &remote_client_ip).await +} + +#[allow(clippy::unused_async)] +pub async fn handle_with_key( + State(tracker): State>, + ExtractRequest(scrape_request): ExtractRequest, + Path(key_id): Path, + remote_client_ip: RemoteClientIp, +) -> Response { + debug!("http scrape request: {:#?}", &scrape_request); + + match auth::authenticate(&key_id, &tracker).await { + Ok(_) => (), + Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, + } + + handle_real_scrape(&tracker, &scrape_request, &remote_client_ip).await +} + +async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &RemoteClientIp) -> Response { + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { + Ok(peer_ip) => peer_ip, + Err(err) => return err, + }; + + let scrape_data = services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; + + responses::scrape::Bencoded::from(scrape_data).into_response() +} + +/// When authentication fails in `private` mode the tracker returns empty swarm metadata for all the requested infohashes. +async fn handle_fake_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &RemoteClientIp) -> Response { + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, }; - let scrape_data = services::scrape::invoke(tracker.clone(), &scrape_request.info_hashes, &peer_ip).await; + let scrape_data = services::scrape::fake_invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; responses::scrape::Bencoded::from(scrape_data).into_response() } diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 646dd0aa3..21b7260ae 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -15,7 +15,8 @@ pub fn router(tracker: &Arc) -> Router { .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) // Scrape request - .route("/scrape", get(scrape::handle).with_state(tracker.clone())) + .route("/scrape", get(scrape::handle_without_key).with_state(tracker.clone())) + .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker.clone())) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) } diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs index f40b8f999..30f00a47b 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/axum_implementation/services/scrape.rs @@ -4,9 +4,25 @@ use std::sync::Arc; use crate::protocol::info_hash::InfoHash; use crate::tracker::{statistics, ScrapeData, Tracker}; -pub async fn invoke(tracker: Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { +pub async fn invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { let scrape_data = tracker.scrape(info_hashes).await; + send_scrape_event(original_peer_ip, tracker).await; + + scrape_data +} + +/// When the peer is not authenticated and the tracker is running in `private` mode, +/// the tracker returns empty stats for all the torrents. +pub async fn fake_invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { + let scrape_data = tracker.empty_scrape_for(info_hashes); + + send_scrape_event(original_peer_ip, tracker).await; + + scrape_data +} + +async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { match original_peer_ip { IpAddr::V4(_) => { tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; @@ -15,6 +31,4 @@ pub async fn invoke(tracker: Arc, info_hashes: &Vec, original tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; } } - - scrape_data } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 0fb434aea..2604c5045 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -142,6 +142,17 @@ impl Tracker { scrape_data } + // It return empty swarm metadata for all the infohashes. + pub fn empty_scrape_for(&self, info_hashes: &Vec) -> ScrapeData { + let mut scrape_data = ScrapeData::empty(); + + for info_hash in info_hashes { + scrape_data.add_file(info_hash, SwarmMetadata::default()); + } + + scrape_data + } + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { let torrents = self.get_torrents().await; match torrents.get(info_hash) { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 28ed252e9..f60c755e0 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2606,8 +2606,7 @@ mod axum_http_tracker_server { use crate::http::responses::scrape::{File, ResponseBuilder}; use crate::http::server::start_private_http_tracker; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { let http_tracker = start_private_http_tracker(Version::Axum).await; @@ -2636,8 +2635,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { let http_tracker = start_private_http_tracker(Version::Axum).await; @@ -2677,10 +2675,10 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { // There is not authentication error + // code-review: should this really be this way? let http_tracker = start_private_http_tracker(Version::Axum).await; From 95a69e5d9bd616c0507b90d80d5817fcdde36068 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Feb 2023 17:51:55 +0000 Subject: [PATCH 351/435] feat(http): [#196] return auth error when the key query param cannot be parsed Returns an specific authentication error when the peer uses a key with an invalid format. --- .../axum_implementation/handlers/announce.rs | 11 +++++- src/http/axum_implementation/handlers/auth.rs | 13 +++++++ .../axum_implementation/handlers/scrape.rs | 12 ++++++- tests/http_tracker.rs | 34 +++++++++++++++++-- 4 files changed, 66 insertions(+), 4 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 3ad11df51..b9b964605 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -7,6 +7,7 @@ use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use log::debug; +use super::auth::KeyIdParam; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; @@ -41,11 +42,19 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - Path(key_id): Path, + Path(key_id_param): Path, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); + let Ok(key_id) = key_id_param.value().parse::() else { + return responses::error::Error::from( + auth::Error::InvalidKeyFormat { + location: Location::caller() + }) + .into_response() + }; + match auth::authenticate(&key_id, &tracker).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs index 13f5b27e6..3b9aebc23 100644 --- a/src/http/axum_implementation/handlers/auth.rs +++ b/src/http/axum_implementation/handlers/auth.rs @@ -1,16 +1,29 @@ use std::panic::Location; use std::sync::Arc; +use serde::Deserialize; use thiserror::Error; use crate::http::axum_implementation::responses; use crate::tracker::auth::{self, KeyId}; use crate::tracker::Tracker; +#[derive(Deserialize)] +pub struct KeyIdParam(String); + +impl KeyIdParam { + #[must_use] + pub fn value(&self) -> String { + self.0.clone() + } +} + #[derive(Debug, Error)] pub enum Error { #[error("Missing authentication key for private tracker. Error in {location}")] MissingAuthKey { location: &'static Location<'static> }, + #[error("Invalid format authentication key. Error in {location}")] + InvalidKeyFormat { location: &'static Location<'static> }, } /// # Errors diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 37e9a9062..814cdbfa4 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,9 +1,11 @@ +use std::panic::Location; use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use log::debug; +use super::auth::KeyIdParam; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; @@ -32,11 +34,19 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - Path(key_id): Path, + Path(key_id_param): Path, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); + let Ok(key_id) = key_id_param.value().parse::() else { + return responses::error::Error::from( + auth::Error::InvalidKeyFormat { + location: Location::caller() + }) + .into_response() + }; + match auth::authenticate(&key_id, &tracker).await { Ok(_) => (), Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index f60c755e0..0536ab0b7 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2575,7 +2575,22 @@ mod axum_http_tracker_server { } #[tokio::test] - async fn should_fail_if_the_peer_authentication_key_is_not_valid() { + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let invalid_key_id = "INVALID_KEY_ID"; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!( + "announce/{invalid_key_id}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; // The tracker does not have this key @@ -2600,12 +2615,27 @@ mod axum_http_tracker_server { use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; + use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; use crate::http::server::start_private_http_tracker; + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let http_tracker_server = start_private_http_tracker(Version::Axum).await; + + let invalid_key_id = "INVALID_KEY_ID"; + + let response = Client::new(http_tracker_server.get_connection_info()) + .get(&format!( + "scrape/{invalid_key_id}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + )) + .await; + + assert_authentication_error_response(response).await; + } + #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { let http_tracker = start_private_http_tracker(Version::Axum).await; From ea249733f4f74ae1c35e47fda965d97b83681f11 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 11:20:20 +0000 Subject: [PATCH 352/435] refactor(http): [#200] move function to Tracker --- src/http/axum_implementation/handlers/announce.rs | 2 +- src/http/axum_implementation/handlers/auth.rs | 15 +-------------- src/http/axum_implementation/handlers/scrape.rs | 2 +- src/tracker/mod.rs | 11 +++++++++++ 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index b9b964605..9f39a5bdf 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -55,7 +55,7 @@ pub async fn handle_with_key( .into_response() }; - match auth::authenticate(&key_id, &tracker).await { + match tracker.authenticate(&key_id).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), } diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs index 3b9aebc23..366526664 100644 --- a/src/http/axum_implementation/handlers/auth.rs +++ b/src/http/axum_implementation/handlers/auth.rs @@ -1,12 +1,10 @@ use std::panic::Location; -use std::sync::Arc; use serde::Deserialize; use thiserror::Error; use crate::http::axum_implementation::responses; -use crate::tracker::auth::{self, KeyId}; -use crate::tracker::Tracker; +use crate::tracker::auth; #[derive(Deserialize)] pub struct KeyIdParam(String); @@ -26,17 +24,6 @@ pub enum Error { InvalidKeyFormat { location: &'static Location<'static> }, } -/// # Errors -/// -/// Will return an error if the the authentication key cannot be verified. -pub async fn authenticate(key_id: &KeyId, tracker: &Arc) -> Result<(), auth::Error> { - if tracker.is_private() { - tracker.verify_auth_key(key_id).await - } else { - Ok(()) - } -} - impl From for responses::error::Error { fn from(err: Error) -> Self { responses::error::Error { diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 814cdbfa4..6edf2fdb8 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -47,7 +47,7 @@ pub async fn handle_with_key( .into_response() }; - match auth::authenticate(&key_id, &tracker).await { + match tracker.authenticate(&key_id).await { Ok(_) => (), Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 2604c5045..31eeef6dc 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -334,6 +334,17 @@ impl Tracker { Ok(()) } + /// # Errors + /// + /// Will return an error if the the authentication key cannot be verified. + pub async fn authenticate(&self, key_id: &KeyId) -> Result<(), auth::Error> { + if self.is_private() { + self.verify_auth_key(key_id).await + } else { + Ok(()) + } + } + /// Loading the torrents from database into memory /// /// # Errors From cd14c6b334c4a129ebf997f7095f3fe0cdd9e62b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 11:58:13 +0000 Subject: [PATCH 353/435] feat(http): [#200] announce request in listed mode --- .../axum_implementation/handlers/announce.rs | 8 +++++- src/http/axum_implementation/handlers/mod.rs | 11 ++++++++ .../axum_implementation/handlers/scrape.rs | 3 ++- src/tracker/mod.rs | 25 +++++++++++++++++++ tests/http_tracker.rs | 6 ++--- 5 files changed, 47 insertions(+), 6 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 9f39a5bdf..e4cd476fa 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -28,7 +28,7 @@ pub async fn handle_without_key( ) -> Response { debug!("http announce request: {:#?}", announce_request); - if tracker.is_private() { + if tracker.requires_authentication() { return responses::error::Error::from(auth::Error::MissingAuthKey { location: Location::caller(), }) @@ -47,6 +47,7 @@ pub async fn handle_with_key( ) -> Response { debug!("http announce request: {:#?}", announce_request); + // todo: extract to Axum extractor. Duplicate code in `scrape` handler. let Ok(key_id) = key_id_param.value().parse::() else { return responses::error::Error::from( auth::Error::InvalidKeyFormat { @@ -64,6 +65,11 @@ pub async fn handle_with_key( } async fn handle(tracker: &Arc, announce_request: &Announce, remote_client_ip: &RemoteClientIp) -> Response { + match tracker.authorize(&announce_request.info_hash).await { + Ok(_) => (), + Err(error) => return responses::error::Error::from(error).into_response(), + } + let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { Ok(peer_ip) => peer_ip, Err(err) => return err, diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index 0d8aa7f52..7cc5022e6 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -1,4 +1,15 @@ +use super::responses; +use crate::tracker::error::Error; + pub mod announce; pub mod auth; pub mod scrape; pub mod status; + +impl From for responses::error::Error { + fn from(err: Error) -> Self { + responses::error::Error { + failure_reason: format!("Tracker error: {err}"), + } + } +} diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 6edf2fdb8..649d630b0 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -23,7 +23,7 @@ pub async fn handle_without_key( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - if tracker.is_private() { + if tracker.requires_authentication() { return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await; } @@ -39,6 +39,7 @@ pub async fn handle_with_key( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); + // todo: extract to Axum extractor. Duplicate code in `announce` handler. let Ok(key_id) = key_id_param.value().parse::() else { return responses::error::Error::from( auth::Error::InvalidKeyFormat { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 31eeef6dc..7733940c9 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -107,6 +107,10 @@ impl Tracker { self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } + pub fn requires_authentication(&self) -> bool { + self.is_private() + } + /// It handles an announce request. /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). @@ -345,6 +349,27 @@ impl Tracker { } } + /// The only authorization process is the whitelist. + /// + /// # Errors + /// + /// Will return an error if the tracker is running in `listed` mode + /// and the infohash is not whitelisted. + pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { + if !self.is_whitelisted() { + return Ok(()); + } + + if self.is_info_hash_whitelisted(info_hash).await { + return Ok(()); + } + + return Err(Error::TorrentNotWhitelisted { + info_hash: *info_hash, + location: Location::caller(), + }); + } + /// Loading the torrents from database into memory /// /// # Errors diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 0536ab0b7..60ccae06b 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2403,8 +2403,7 @@ mod axum_http_tracker_server { use crate::http::requests::announce::QueryBuilder; use crate::http::server::start_whitelisted_http_tracker; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; @@ -2417,8 +2416,7 @@ mod axum_http_tracker_server { assert_torrent_not_in_whitelist_error_response(response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; From a8e02b35985921284f121f63182526df45017106 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 12:31:35 +0000 Subject: [PATCH 354/435] feat(http): [#201] scrape request in listed mode --- src/tracker/mod.rs | 6 +++++- src/tracker/torrent.rs | 7 +++++++ tests/http_tracker.rs | 6 ++---- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 7733940c9..a3e0ed4fc 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -140,7 +140,11 @@ impl Tracker { let mut scrape_data = ScrapeData::empty(); for info_hash in info_hashes { - scrape_data.add_file(info_hash, self.get_swarm_metadata(info_hash).await); + let swarm_metadata = match self.authorize(info_hash).await { + Ok(_) => self.get_swarm_metadata(info_hash).await, + Err(_) => SwarmMetadata::zeroed(), + }; + scrape_data.add_file(info_hash, swarm_metadata); } scrape_data diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index dc41b083e..aa155dfac 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -21,6 +21,13 @@ pub struct SwarmMetadata { pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) } +impl SwarmMetadata { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} + impl Entry { #[must_use] pub fn new() -> Entry { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 60ccae06b..96062b46e 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2450,8 +2450,7 @@ mod axum_http_tracker_server { use crate::http::responses::scrape::{File, ResponseBuilder}; use crate::http::server::start_whitelisted_http_tracker; - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; @@ -2480,8 +2479,7 @@ mod axum_http_tracker_server { assert_scrape_response(response, &expected_scrape_response).await; } - //#[tokio::test] - #[allow(dead_code)] + #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; From 2ffbf3d550b2bb09df1afba3454e96283b702b6d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 14:25:59 +0000 Subject: [PATCH 355/435] refactor(http): [#204] remove temporary status endpoint It was only added to test the initial HTTP scaffolding for the Axum implmentation. --- src/http/axum_implementation/handlers/mod.rs | 1 - .../axum_implementation/handlers/status.rs | 12 -- src/http/axum_implementation/mod.rs | 1 - src/http/axum_implementation/resources/mod.rs | 1 - src/http/axum_implementation/resources/ok.rs | 8 - src/http/axum_implementation/responses/mod.rs | 1 - src/http/axum_implementation/responses/ok.rs | 11 -- src/http/axum_implementation/routes.rs | 4 +- tests/http_tracker.rs | 139 +----------------- 9 files changed, 2 insertions(+), 176 deletions(-) delete mode 100644 src/http/axum_implementation/handlers/status.rs delete mode 100644 src/http/axum_implementation/resources/mod.rs delete mode 100644 src/http/axum_implementation/resources/ok.rs delete mode 100644 src/http/axum_implementation/responses/ok.rs diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index 7cc5022e6..e6b13ae91 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -4,7 +4,6 @@ use crate::tracker::error::Error; pub mod announce; pub mod auth; pub mod scrape; -pub mod status; impl From for responses::error::Error { fn from(err: Error) -> Self { diff --git a/src/http/axum_implementation/handlers/status.rs b/src/http/axum_implementation/handlers/status.rs deleted file mode 100644 index 8a058b456..000000000 --- a/src/http/axum_implementation/handlers/status.rs +++ /dev/null @@ -1,12 +0,0 @@ -/// Temporary handler for testing and debugging the new Axum implementation -/// It should be removed once the migration to Axum is finished. -use axum::response::Json; - -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::resources::ok::Ok; -use crate::http::axum_implementation::responses::ok; - -#[allow(clippy::unused_async)] -pub async fn handle(remote_client_ip: RemoteClientIp) -> Json { - ok::response(&remote_client_ip) -} diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index d8431457a..ecc60e1f8 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -2,7 +2,6 @@ pub mod extractors; pub mod handlers; pub mod query; pub mod requests; -pub mod resources; pub mod responses; pub mod routes; pub mod server; diff --git a/src/http/axum_implementation/resources/mod.rs b/src/http/axum_implementation/resources/mod.rs deleted file mode 100644 index a493c2ac2..000000000 --- a/src/http/axum_implementation/resources/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod ok; diff --git a/src/http/axum_implementation/resources/ok.rs b/src/http/axum_implementation/resources/ok.rs deleted file mode 100644 index f941b9fb3..000000000 --- a/src/http/axum_implementation/resources/ok.rs +++ /dev/null @@ -1,8 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] -pub struct Ok { - pub remote_client_ip: RemoteClientIp, -} diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/axum_implementation/responses/mod.rs index 7e8666934..bdc689056 100644 --- a/src/http/axum_implementation/responses/mod.rs +++ b/src/http/axum_implementation/responses/mod.rs @@ -1,4 +1,3 @@ pub mod announce; pub mod error; -pub mod ok; pub mod scrape; diff --git a/src/http/axum_implementation/responses/ok.rs b/src/http/axum_implementation/responses/ok.rs deleted file mode 100644 index dfd062b51..000000000 --- a/src/http/axum_implementation/responses/ok.rs +++ /dev/null @@ -1,11 +0,0 @@ -use axum::Json; - -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::resources::ok::Ok; - -#[must_use] -pub fn response(remote_client_ip: &RemoteClientIp) -> Json { - Json(Ok { - remote_client_ip: remote_client_ip.clone(), - }) -} diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index 21b7260ae..af987ece2 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -4,13 +4,11 @@ use axum::routing::get; use axum::Router; use axum_client_ip::SecureClientIpSource; -use super::handlers::{announce, scrape, status}; +use super::handlers::{announce, scrape}; use crate::tracker::Tracker; pub fn router(tracker: &Arc) -> Router { Router::new() - // Status - .route("/status", get(status::handle)) // Announce request .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 96062b46e..2360df9ab 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -5,7 +5,7 @@ /// cargo test `warp_http_tracker_server` -- --nocapture /// ``` /// -/// Axum version ()WIP): +/// Axum version (WIP): /// ```text /// cargo test `warp_http_tracker_server` -- --nocapture /// ``` @@ -1271,143 +1271,6 @@ mod axum_http_tracker_server { // WIP: migration HTTP from Warp to Axum - use local_ip_address::local_ip; - use torrust_tracker::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - use torrust_tracker::http::axum_implementation::resources::ok::Ok; - use torrust_tracker::http::Version; - - use crate::http::client::Client; - use crate::http::server::start_default_http_tracker; - - #[tokio::test] - async fn should_return_the_status() { - // This is a temporary test to test the new Axum HTTP tracker server scaffolding - - let http_tracker_server = start_default_http_tracker(Version::Axum).await; - - let client_ip = local_ip().unwrap(); - - let response = Client::bind(http_tracker_server.get_connection_info(), client_ip) - .get("status") - .await; - - let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); - - assert_eq!( - ok, - Ok { - remote_client_ip: RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: Some(client_ip) - } - } - ); - } - - mod should_get_the_remote_client_ip_from_the_http_request { - - // Temporary tests to test that the new Axum HTTP tracker gets the right remote client IP. - // Once the implementation is finished, test for announce request will cover these cases. - - use std::net::IpAddr; - use std::str::FromStr; - - use local_ip_address::local_ip; - use torrust_tracker::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - use torrust_tracker::http::axum_implementation::resources::ok::Ok; - use torrust_tracker::http::Version; - - use crate::http::client::Client; - use crate::http::server::{start_http_tracker_on_reverse_proxy, start_public_http_tracker}; - - #[tokio::test] - async fn when_the_client_ip_is_a_local_ip_it_should_assign_that_ip() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; - - let client_ip = local_ip().unwrap(); - - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - - let response = client.get("status").await; - - let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); - - assert_eq!( - ok, - Ok { - remote_client_ip: RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: Some(client_ip) - } - } - ); - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_that_ip() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; - - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - - let response = client.get("status").await; - - let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); - - assert_eq!( - ok, - Ok { - remote_client_ip: RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: Some(client_ip) - } - } - ); - } - - #[tokio::test] - async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_as_secure_ip_the_right_most_ip_in_the_x_forwarded_for_http_header( - ) { - /* - client <-> http proxy <-> tracker <-> Internet - ip: header: config: remote client ip: - 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 - */ - - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; - - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); - - let left_most_ip = IpAddr::from_str("203.0.113.195").unwrap(); - let right_most_ip = IpAddr::from_str("150.172.238.178").unwrap(); - - let response = client - .get_with_header( - "status", - "X-Forwarded-For", - &format!("{left_most_ip},2001:db8:85a3:8d3:1319:8a2e:370:7348,{right_most_ip}"), - ) - .await; - - let ok: Ok = serde_json::from_str(&response.text().await.unwrap()).unwrap(); - - assert_eq!( - ok, - Ok { - remote_client_ip: RemoteClientIp { - right_most_x_forwarded_for: Some(right_most_ip), - connection_info_ip: Some(client_ip) - } - } - ); - } - } - mod for_all_config_modes { mod and_running_on_reverse_proxy { From a9e3a33c0592f642f77faca84d84c46d25ca527a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 1 Mar 2023 18:10:49 +0000 Subject: [PATCH 356/435] refactor(http): extract Axum extractor for the URL path param key --- .../axum_implementation/extractors/key.rs | 55 +++++++++++++++++++ .../axum_implementation/extractors/mod.rs | 1 + .../axum_implementation/handlers/announce.rs | 16 +----- src/http/axum_implementation/handlers/auth.rs | 6 +- .../axum_implementation/handlers/scrape.rs | 18 +----- 5 files changed, 66 insertions(+), 30 deletions(-) create mode 100644 src/http/axum_implementation/extractors/key.rs diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs new file mode 100644 index 000000000..6cc2f13e8 --- /dev/null +++ b/src/http/axum_implementation/extractors/key.rs @@ -0,0 +1,55 @@ +use std::panic::Location; + +use axum::async_trait; +use axum::extract::{FromRequestParts, Path}; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; + +use crate::http::axum_implementation::handlers::auth::{self, KeyIdParam}; +use crate::http::axum_implementation::responses; +use crate::tracker::auth::KeyId; + +pub struct ExtractKeyId(pub KeyId); + +#[async_trait] +impl FromRequestParts for ExtractKeyId +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + match Path::::from_request_parts(parts, state).await { + Ok(key_id_param) => { + let Ok(key_id) = key_id_param.0.value().parse::() else { + return Err(responses::error::Error::from( + auth::Error::InvalidKeyFormat { + location: Location::caller() + }) + .into_response()) + }; + Ok(ExtractKeyId(key_id)) + } + Err(rejection) => match rejection { + axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { + return Err(responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + }) + .into_response()) + } + axum::extract::rejection::PathRejection::MissingPathParams(_) => { + return Err(responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + }) + .into_response()) + } + _ => { + return Err(responses::error::Error::from(auth::Error::CannotExtractKeyParam { + location: Location::caller(), + }) + .into_response()) + } + }, + } + } +} diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 380eeda6d..e6d9e8c67 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,4 +1,5 @@ pub mod announce_request; +pub mod key; pub mod peer_ip; pub mod remote_client_ip; pub mod scrape_request; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index e4cd476fa..93dbc8115 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -3,12 +3,12 @@ use std::panic::Location; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use axum::extract::{Path, State}; +use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use super::auth::KeyIdParam; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; +use crate::http::axum_implementation::extractors::key::ExtractKeyId; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::handlers::auth; @@ -16,7 +16,6 @@ use crate::http::axum_implementation::requests::announce::{Announce, Compact, Ev use crate::http::axum_implementation::responses::{self, announce}; use crate::http::axum_implementation::services; use crate::protocol::clock::{Current, Time}; -use crate::tracker::auth::KeyId; use crate::tracker::peer::Peer; use crate::tracker::Tracker; @@ -42,20 +41,11 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - Path(key_id_param): Path, + ExtractKeyId(key_id): ExtractKeyId, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); - // todo: extract to Axum extractor. Duplicate code in `scrape` handler. - let Ok(key_id) = key_id_param.value().parse::() else { - return responses::error::Error::from( - auth::Error::InvalidKeyFormat { - location: Location::caller() - }) - .into_response() - }; - match tracker.authenticate(&key_id).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs index 366526664..5673ea851 100644 --- a/src/http/axum_implementation/handlers/auth.rs +++ b/src/http/axum_implementation/handlers/auth.rs @@ -18,10 +18,12 @@ impl KeyIdParam { #[derive(Debug, Error)] pub enum Error { - #[error("Missing authentication key for private tracker. Error in {location}")] + #[error("Missing authentication key param for private tracker. Error in {location}")] MissingAuthKey { location: &'static Location<'static> }, - #[error("Invalid format authentication key. Error in {location}")] + #[error("Invalid format for authentication key param. Error in {location}")] InvalidKeyFormat { location: &'static Location<'static> }, + #[error("Cannot extract authentication key param from URL path. Error in {location}")] + CannotExtractKeyParam { location: &'static Location<'static> }, } impl From for responses::error::Error { diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 649d630b0..19d902f8e 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -1,18 +1,15 @@ -use std::panic::Location; use std::sync::Arc; -use axum::extract::{Path, State}; +use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use super::auth::KeyIdParam; +use crate::http::axum_implementation::extractors::key::ExtractKeyId; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; -use crate::http::axum_implementation::handlers::auth; use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::{responses, services}; -use crate::tracker::auth::KeyId; use crate::tracker::Tracker; #[allow(clippy::unused_async)] @@ -34,20 +31,11 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - Path(key_id_param): Path, + ExtractKeyId(key_id): ExtractKeyId, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - // todo: extract to Axum extractor. Duplicate code in `announce` handler. - let Ok(key_id) = key_id_param.value().parse::() else { - return responses::error::Error::from( - auth::Error::InvalidKeyFormat { - location: Location::caller() - }) - .into_response() - }; - match tracker.authenticate(&key_id).await { Ok(_) => (), Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, From 2ddefa87fa65db43a3747a328a28510e2ca6798f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 11:45:54 +0000 Subject: [PATCH 357/435] feat: add .coverage dir to .gitignore See: https://github.com/torrust/torrust-tracker/discussions/209 --- .gitignore | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index d574298da..6b58dcb45 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,11 @@ .env -/target **/*.rs.bk -/database.json.bz2 -/database.db +/.coverage/ /.idea/ +/.vscode/launch.json /config.toml /data.db -/.vscode/launch.json +/database.db +/database.json.bz2 /storage/ - - +/target From ee5b0886ec510b1c775a7664c58ca8070c2e8074 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 11:49:40 +0000 Subject: [PATCH 358/435] docs(http): add comment for duplicate struct info --- src/tracker/torrent.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index aa155dfac..23ca6886e 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -14,6 +14,8 @@ pub struct Entry { pub completed: u32, } +/// Swarm metadata dictionary in the scrape response. +/// BEP 48: #[derive(Debug, PartialEq, Default)] pub struct SwarmMetadata { pub complete: u32, // The number of active peers that have completed downloading (seeders) @@ -28,6 +30,14 @@ impl SwarmMetadata { } } +/// Swarm statistics. Alternative struct for swarm metadata in scrape response. +#[derive(Debug)] +pub struct SwamStats { + pub completed: u32, // The number of peers that have ever completed downloading + pub seeders: u32, // The number of active peers that have completed downloading (seeders) + pub leechers: u32, // The number of active peers that have not completed downloading (leechers) +} + impl Entry { #[must_use] pub fn new() -> Entry { @@ -111,13 +121,6 @@ impl Default for Entry { } } -#[derive(Debug)] -pub struct SwamStats { - pub completed: u32, - pub seeders: u32, - pub leechers: u32, -} - #[cfg(test)] mod tests { From fca5353fb825736bc2d86ed477fd25c306245a8b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 16:43:56 +0000 Subject: [PATCH 359/435] test(tracker): [#207] add test for Tracker::announce --- src/tracker/mod.rs | 388 +++++++++++++++++++++++++++++------------ src/tracker/torrent.rs | 2 +- 2 files changed, 273 insertions(+), 117 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a3e0ed4fc..c2d66244a 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -44,6 +44,7 @@ pub struct TorrentsMetrics { pub torrents: u64, } +#[derive(Debug, PartialEq, Default)] pub struct AnnounceData { pub peers: Vec, pub swam_stats: SwamStats, @@ -523,12 +524,14 @@ mod tests { mod the_tracker { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::{self, Peer}; use crate::tracker::statistics::Keeper; use crate::tracker::{TorrentsMetrics, Tracker}; @@ -555,6 +558,62 @@ mod tests { } } + fn info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + // The client peer IP + fn peer_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) + } + + /// Sample peer whose state is not relevant for the tests + fn sample_peer() -> Peer { + complete_peer() + } + + /// Sample peer when for tests that need more than one peer + fn peer1() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Completed, + } + } + + /// Sample peer when for tests that need more than one peer + fn peer2() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Completed, + } + } + + fn seeder() -> Peer { + complete_peer() + } + + fn leecher() -> Peer { + incomplete_peer() + } + + fn started_peer() -> Peer { + incomplete_peer() + } + + fn completed_peer() -> Peer { + complete_peer() + } + /// A peer that has completed downloading. fn complete_peer() -> Peer { Peer { @@ -598,181 +657,278 @@ mod tests { ); } - mod handling_an_announce_request { - mod should_assign_the_ip_to_the_peer { - - use std::net::{IpAddr, Ipv4Addr}; - - use crate::tracker::assign_ip_address_to_peer; - - #[test] - fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + mod for_all_config_modes { - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + mod handling_an_announce_request { - assert_eq!(peer_ip, remote_ip); - } + use crate::tracker::tests::the_tracker::{info_hash, peer1, peer2, peer_ip, sample_peer, tracker_factory}; - mod and_when_the_client_ip_is_a_ipv4_loopback_ip { + mod should_assign_the_ip_to_the_peer { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; + use std::net::{IpAddr, Ipv4Addr}; use crate::tracker::assign_ip_address_to_peer; #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let peer_ip = assign_ip_address_to_peer(&remote_ip, None); assert_eq!(peer_ip, remote_ip); } - #[test] - fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + mod and_when_the_client_ip_is_a_ipv4_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( + ) { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); - assert_eq!(peer_ip, tracker_external_ip); + assert_eq!(peer_ip, tracker_external_ip); + } } - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( - ) { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + mod and_when_client_ip_is_a_ipv6_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } - let tracker_external_ip = - IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - assert_eq!(peer_ip, tracker_external_ip); + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( + ) { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } } } - mod and_when_client_ip_is_a_ipv6_loopback_ip { + #[tokio::test] + async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { + let tracker = tracker_factory(); - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; + let mut peer = sample_peer(); - use crate::tracker::assign_ip_address_to_peer; + let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; - #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + assert_eq!(announce_data.peers, vec![]); + } - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + #[tokio::test] + async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { + let tracker = tracker_factory(); - assert_eq!(peer_ip, remote_ip); + let mut previously_announced_peer = peer1(); + tracker + .announce(&info_hash(), &mut previously_announced_peer, &peer_ip()) + .await; + + let mut peer = peer2(); + let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + + assert_eq!(announce_data.peers, vec![previously_announced_peer]); + } + + mod it_should_update_the_swarm_stats_for_the_torrent { + + use crate::tracker::tests::the_tracker::{ + completed_peer, info_hash, leecher, peer_ip, seeder, started_peer, tracker_factory, + }; + + #[tokio::test] + async fn when_the_peer_is_a_seeder() { + let tracker = tracker_factory(); + + let mut peer = seeder(); + + let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + + assert_eq!(announce_data.swam_stats.seeders, 1); } - #[test] - fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + #[tokio::test] + async fn when_the_peer_is_a_leecher() { + let tracker = tracker_factory(); - let tracker_external_ip = - IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + let mut peer = leecher(); - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(peer_ip, tracker_external_ip); + assert_eq!(announce_data.swam_stats.leechers, 1); } - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( - ) { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + #[tokio::test] + async fn when_a_previously_announced_started_peer_has_completed_downloading() { + let tracker = tracker_factory(); - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + // We have to announce with "started" event because peer does not count if peer was not previously known + let mut started_peer = started_peer(); + tracker.announce(&info_hash(), &mut started_peer, &peer_ip()).await; - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + let mut completed_peer = completed_peer(); + let announce_data = tracker.announce(&info_hash(), &mut completed_peer, &peer_ip()).await; - assert_eq!(peer_ip, tracker_external_ip); + assert_eq!(announce_data.swam_stats.completed, 1); } } } - } - mod handling_a_scrape_request { + mod handling_a_scrape_request { - use std::net::{IpAddr, Ipv4Addr}; + use std::net::{IpAddr, Ipv4Addr}; - use crate::protocol::info_hash::InfoHash; - use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, tracker_factory}; - use crate::tracker::{ScrapeData, SwarmMetadata}; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, tracker_factory}; + use crate::tracker::{ScrapeData, SwarmMetadata}; - #[tokio::test] - async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent() { - let tracker = tracker_factory(); + #[tokio::test] + async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( + ) { + let tracker = tracker_factory(); - let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; + let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; - let scrape_data = tracker.scrape(&info_hashes).await; + let scrape_data = tracker.scrape(&info_hashes).await; - let mut expected_scrape_data = ScrapeData::empty(); + let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); - assert_eq!(scrape_data, expected_scrape_data); - } + assert_eq!(scrape_data, expected_scrape_data); + } - #[tokio::test] - async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let tracker = tracker_factory(); - - let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); - - // Announce a "complete" peer for the torrent - let mut complete_peer = complete_peer(); - tracker - .announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))) - .await; - - // Announce an "incomplete" peer for the torrent - let mut incomplete_peer = incomplete_peer(); - tracker - .announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))) - .await; - - // Scrape - let scrape_data = tracker.scrape(&vec![info_hash]).await; - - // The expected swarm metadata for the file - let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file( - &info_hash, - SwarmMetadata { - complete: 0, // the "complete" peer does not count because it was not previously known - downloaded: 0, - incomplete: 1, // the "incomplete" peer we have just announced - }, - ); - - assert_eq!(scrape_data, expected_scrape_data); - } + #[tokio::test] + async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { + let tracker = tracker_factory(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + // Announce a "complete" peer for the torrent + let mut complete_peer = complete_peer(); + tracker + .announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))) + .await; + + // Announce an "incomplete" peer for the torrent + let mut incomplete_peer = incomplete_peer(); + tracker + .announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))) + .await; + + // Scrape + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 0, // the "complete" peer does not count because it was not previously known + downloaded: 0, + incomplete: 1, // the "incomplete" peer we have just announced + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); + } - #[tokio::test] - async fn it_should_allow_scraping_for_multiple_torrents() { - let tracker = tracker_factory(); + #[tokio::test] + async fn it_should_allow_scraping_for_multiple_torrents() { + let tracker = tracker_factory(); - let info_hashes = vec![ - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), - ]; + let info_hashes = vec![ + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), + ]; - let scrape_data = tracker.scrape(&info_hashes).await; + let scrape_data = tracker.scrape(&info_hashes).await; - let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[1]); + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_no_metadata(&info_hashes[1]); - assert_eq!(scrape_data, expected_scrape_data); + assert_eq!(scrape_data, expected_scrape_data); + } } } + + mod configured_as_whitelisted { + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } + + mod configured_as_private { + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } + + mod configured_as_private_and_whitelisted { + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 23ca6886e..3362234f0 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -31,7 +31,7 @@ impl SwarmMetadata { } /// Swarm statistics. Alternative struct for swarm metadata in scrape response. -#[derive(Debug)] +#[derive(Debug, PartialEq, Default)] pub struct SwamStats { pub completed: u32, // The number of peers that have ever completed downloading pub seeders: u32, // The number of active peers that have completed downloading (seeders) From d50372fbf064413e2d05aac9a5be2d4b13136083 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 16:44:55 +0000 Subject: [PATCH 360/435] feat(cargo): add cargo alias to generate coverage reports --- .cargo/config.toml | 3 +++ cSpell.json | 1 + 2 files changed, 4 insertions(+) create mode 100644 .cargo/config.toml diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000..2fb2fe92d --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,3 @@ +[alias] +cov = "llvm-cov --lcov --output-path=./coverage/lcov.info" +cov-html = "llvm-cov --html" diff --git a/cSpell.json b/cSpell.json index b8aceb568..4a9b11ce9 100644 --- a/cSpell.json +++ b/cSpell.json @@ -31,6 +31,7 @@ "infohashes", "infoschema", "intervali", + "lcov", "leecher", "leechers", "libtorrent", From 1e7eff59d35e50cea6d110bdf7528f02af3f011d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 17:14:26 +0000 Subject: [PATCH 361/435] docs(tracker): add code-review --- src/tracker/mod.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index c2d66244a..d638f6601 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -120,6 +120,17 @@ impl Tracker { // a tuple with the new peer and the announce data: (Peer, AnnounceData). // It could even be a different struct: `StoredPeer` or `PublicPeer`. + // code-review: in the `scrape` function we perform an authorization check. + // We check if the torrent is whitelisted. Should we also check authorization here? + // I think so because the `Tracker` has the responsibility for checking authentication and authorization. + // The `Tracker` has delegated that responsibility to the handlers + // (because we want to return a friendly error response) but that does not mean we should + // double-check authorization at this domain level too. + // I would propose to return a `Result` here. + // Besides, regarding authentication the `Tracker` is also responsible for authentication but + // we are actually handling authentication at the handlers level. So I would extract that + // responsibility into another authentication service. + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; @@ -614,7 +625,9 @@ mod tests { complete_peer() } - /// A peer that has completed downloading. + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. fn complete_peer() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000000"), @@ -627,7 +640,7 @@ mod tests { } } - /// A peer that has NOT completed downloading. + /// A peer that counts as `incomplete` is swarm metadata fn incomplete_peer() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000000"), From ed58a32ce32d0644804e0f97120bf5e32d4b3c82 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 21:23:05 +0000 Subject: [PATCH 362/435] test(tracker): [#207] add test for Tracker::scrape --- .../axum_implementation/services/scrape.rs | 4 +- src/tracker/mod.rs | 146 ++++++++++++------ 2 files changed, 99 insertions(+), 51 deletions(-) diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs index 30f00a47b..923acf3c4 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/axum_implementation/services/scrape.rs @@ -15,11 +15,9 @@ pub async fn invoke(tracker: &Arc, info_hashes: &Vec, origina /// When the peer is not authenticated and the tracker is running in `private` mode, /// the tracker returns empty stats for all the torrents. pub async fn fake_invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { - let scrape_data = tracker.empty_scrape_for(info_hashes); - send_scrape_event(original_peer_ip, tracker).await; - scrape_data + ScrapeData::zeroed(info_hashes) } async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index d638f6601..6fa1d38fc 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -64,12 +64,23 @@ impl ScrapeData { Self { files } } + #[must_use] + pub fn zeroed(info_hashes: &Vec) -> Self { + let mut scrape_data = Self::empty(); + + for info_hash in info_hashes { + scrape_data.add_file(info_hash, SwarmMetadata::zeroed()); + } + + scrape_data + } + pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) { self.files.insert(*info_hash, swarm_metadata); } - pub fn add_file_with_no_metadata(&mut self, info_hash: &InfoHash) { - self.files.insert(*info_hash, SwarmMetadata::default()); + pub fn add_file_with_zeroed_metadata(&mut self, info_hash: &InfoHash) { + self.files.insert(*info_hash, SwarmMetadata::zeroed()); } } @@ -162,17 +173,6 @@ impl Tracker { scrape_data } - // It return empty swarm metadata for all the infohashes. - pub fn empty_scrape_for(&self, info_hashes: &Vec) -> ScrapeData { - let mut scrape_data = ScrapeData::empty(); - - for info_hash in info_hashes { - scrape_data.add_file(info_hash, SwarmMetadata::default()); - } - - scrape_data - } - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { let torrents = self.get_torrents().await; match torrents.get(info_hash) { @@ -543,25 +543,31 @@ mod tests { use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; + use crate::tracker::mode::Mode; use crate::tracker::peer::{self, Peer}; use crate::tracker::statistics::Keeper; use crate::tracker::{TorrentsMetrics, Tracker}; - pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + pub fn public_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Public; + tracker_factory(configuration) } - pub fn tracker_factory() -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + pub fn whitelisted_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Listed; + tracker_factory(configuration) + } - // Configuration - let configuration = tracker_configuration(); + pub fn tracker_factory(configuration: Configuration) -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. // Initialize stats tracker let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - match Tracker::new(&configuration, Some(stats_event_sender), stats_repository) { + match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) @@ -569,7 +575,7 @@ mod tests { } } - fn info_hash() -> InfoHash { + fn sample_info_hash() -> InfoHash { "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() } @@ -584,7 +590,7 @@ mod tests { } /// Sample peer when for tests that need more than one peer - fn peer1() -> Peer { + fn sample_peer_1() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000001"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), @@ -597,7 +603,7 @@ mod tests { } /// Sample peer when for tests that need more than one peer - fn peer2() -> Peer { + fn sample_peer_2() -> Peer { Peer { peer_id: peer::Id(*b"-qB00000000000000002"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), @@ -655,7 +661,7 @@ mod tests { #[tokio::test] async fn should_collect_torrent_metrics() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let torrents_metrics = tracker.get_torrents_metrics().await; @@ -674,7 +680,9 @@ mod tests { mod handling_an_announce_request { - use crate::tracker::tests::the_tracker::{info_hash, peer1, peer2, peer_ip, sample_peer, tracker_factory}; + use crate::tracker::tests::the_tracker::{ + peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, + }; mod should_assign_the_ip_to_the_peer { @@ -776,26 +784,26 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let mut peer = sample_peer(); - let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; assert_eq!(announce_data.peers, vec![]); } #[tokio::test] async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { - let tracker = tracker_factory(); + let tracker = public_tracker(); - let mut previously_announced_peer = peer1(); + let mut previously_announced_peer = sample_peer_1(); tracker - .announce(&info_hash(), &mut previously_announced_peer, &peer_ip()) + .announce(&sample_info_hash(), &mut previously_announced_peer, &peer_ip()) .await; - let mut peer = peer2(); - let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + let mut peer = sample_peer_2(); + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; assert_eq!(announce_data.peers, vec![previously_announced_peer]); } @@ -803,41 +811,41 @@ mod tests { mod it_should_update_the_swarm_stats_for_the_torrent { use crate::tracker::tests::the_tracker::{ - completed_peer, info_hash, leecher, peer_ip, seeder, started_peer, tracker_factory, + completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, }; #[tokio::test] async fn when_the_peer_is_a_seeder() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let mut peer = seeder(); - let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; assert_eq!(announce_data.swam_stats.seeders, 1); } #[tokio::test] async fn when_the_peer_is_a_leecher() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let mut peer = leecher(); - let announce_data = tracker.announce(&info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; assert_eq!(announce_data.swam_stats.leechers, 1); } #[tokio::test] async fn when_a_previously_announced_started_peer_has_completed_downloading() { - let tracker = tracker_factory(); + let tracker = public_tracker(); // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); - tracker.announce(&info_hash(), &mut started_peer, &peer_ip()).await; + tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip()).await; let mut completed_peer = completed_peer(); - let announce_data = tracker.announce(&info_hash(), &mut completed_peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()).await; assert_eq!(announce_data.swam_stats.completed, 1); } @@ -849,13 +857,13 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; use crate::protocol::info_hash::InfoHash; - use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, tracker_factory}; + use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; use crate::tracker::{ScrapeData, SwarmMetadata}; #[tokio::test] async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( ) { - let tracker = tracker_factory(); + let tracker = public_tracker(); let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; @@ -863,14 +871,14 @@ mod tests { let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); assert_eq!(scrape_data, expected_scrape_data); } #[tokio::test] async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); @@ -905,7 +913,7 @@ mod tests { #[tokio::test] async fn it_should_allow_scraping_for_multiple_torrents() { - let tracker = tracker_factory(); + let tracker = public_tracker(); let info_hashes = vec![ "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), @@ -915,8 +923,8 @@ mod tests { let scrape_data = tracker.scrape(&info_hashes).await; let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[0]); - expected_scrape_data.add_file_with_no_metadata(&info_hashes[1]); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[1]); assert_eq!(scrape_data, expected_scrape_data); } @@ -927,7 +935,49 @@ mod tests { mod handling_an_announce_request {} - mod handling_an_scrape_request {} + mod handling_an_scrape_request { + + use crate::protocol::info_hash::InfoHash; + use crate::tracker::tests::the_tracker::{ + complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, + }; + use crate::tracker::torrent::SwarmMetadata; + use crate::tracker::ScrapeData; + + #[test] + fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { + // Zeroed scrape data is used when the authentication for the scrape request fails. + + let sample_info_hash = sample_info_hash(); + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_zeroed_metadata(&sample_info_hash); + + assert_eq!(ScrapeData::zeroed(&vec![sample_info_hash]), expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { + let tracker = whitelisted_tracker(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + let mut peer = incomplete_peer(); + tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + + // Announce twice to force non zeroed swarm metadata + let mut peer = complete_peer(); + tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected zeroed swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file(&info_hash, SwarmMetadata::zeroed()); + + assert_eq!(scrape_data, expected_scrape_data); + } + } } mod configured_as_private { From d1a7b7fb2493877cb6db814d54fb5709b8f7d5be Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 2 Mar 2023 21:39:05 +0000 Subject: [PATCH 363/435] fix(cargo): fix output path in cargo allias --- .cargo/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 2fb2fe92d..e3d31cf7f 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,3 @@ [alias] -cov = "llvm-cov --lcov --output-path=./coverage/lcov.info" +cov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" cov-html = "llvm-cov --html" From fad683425c284a9c29ee6fa7f597e30767668950 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 13:29:45 +0000 Subject: [PATCH 364/435] test(tracker): [#207] add tests for HTTP authentication in Tracker The `Tracker` is responsible for the authentication in the HTTP protocol. --- src/tracker/auth.rs | 3 ++ src/tracker/mod.rs | 103 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+) diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 22f734e48..01de7a619 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -62,6 +62,9 @@ pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct ExpiringKey { pub id: KeyId, + // todo: we can remove the `Option`. An `ExpiringKey` that does not expire + // is a `KeyId`. In other words, all `ExpiringKeys` must have an + // expiration time. pub valid_until: Option, } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 6fa1d38fc..b8dadab8f 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -199,6 +199,7 @@ impl Tracker { /// /// Will panic if key cannot be converted into a valid `KeyId`. pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { + // todo: change argument `key: &str` to `key_id: &KeyId` self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(&key.parse::().unwrap()); Ok(()) @@ -208,6 +209,8 @@ impl Tracker { /// /// Will return a `key::Error` if unable to get any `auth_key`. pub async fn verify_auth_key(&self, key_id: &KeyId) -> Result<(), auth::Error> { + // code-review: this function is public only because it's used in a test. + // We should change the test and make it private. match self.keys.read().await.get(key_id) { None => Err(auth::Error::UnableToReadKey { location: Location::caller(), @@ -319,6 +322,12 @@ impl Tracker { /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { + // todo: this is a deprecated method. + // We're splitting authentication and authorization responsibilities. + // Use `authenticate` and `authorize` instead. + + // Authentication + // no authentication needed in public mode if self.is_public() { return Ok(()); @@ -343,6 +352,8 @@ impl Tracker { } } + // Authorization + // check if info_hash is whitelisted if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { return Err(Error::TorrentNotWhitelisted { @@ -554,6 +565,12 @@ mod tests { tracker_factory(configuration) } + pub fn private_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Private; + tracker_factory(configuration) + } + pub fn whitelisted_tracker() -> Tracker { let mut configuration = ephemeral_configuration(); configuration.mode = Mode::Listed; @@ -982,6 +999,92 @@ mod tests { mod configured_as_private { + mod handling_authentication { + use std::str::FromStr; + use std::time::Duration; + + use crate::tracker::auth; + use crate::tracker::tests::the_tracker::private_tracker; + + #[tokio::test] + async fn it_should_generate_the_expiring_authentication_keys() { + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + assert_eq!(key.valid_until.unwrap(), Duration::from_secs(100)); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_by_using_a_key() { + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + let result = tracker.authenticate(&key.id()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { + let tracker = private_tracker(); + + let unregistered_key_id = auth::KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let result = tracker.authenticate(&unregistered_key_id).await; + + assert!(result.is_err()); + } + + #[tokio::test] + async fn it_should_verify_a_valid_authentication_key() { + // todo: this should not be tested directly because + // `verify_auth_key` should be a private method. + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); + } + + #[tokio::test] + async fn it_should_fail_verifying_an_unregistered_authentication_key() { + let tracker = private_tracker(); + + let unregistered_key_id = auth::KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + assert!(tracker.verify_auth_key(&unregistered_key_id).await.is_err()); + } + + #[tokio::test] + async fn it_should_remove_an_authentication_key() { + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + let result = tracker.remove_auth_key(&key.id().to_string()).await; + + assert!(result.is_ok()); + assert!(tracker.verify_auth_key(&key.id()).await.is_err()); + } + + #[tokio::test] + async fn it_should_load_authentication_keys_from_the_database() { + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + // Remove the newly generated key in memory + tracker.keys.write().await.remove(&key.id()); + + let result = tracker.load_keys().await; + + assert!(result.is_ok()); + assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); + } + } + mod handling_an_announce_request {} mod handling_an_scrape_request {} From 40ff2498cf3045512daa308021fffd15b2729299 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 13:53:23 +0000 Subject: [PATCH 365/435] test(tracker): [#207] add test for authorization (whitelist) in Tracker --- src/tracker/mod.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index b8dadab8f..e16fefa4f 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -950,6 +950,33 @@ mod tests { mod configured_as_whitelisted { + mod handling_authorization { + use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = tracker.add_torrent_to_whitelist(&info_hash).await; + assert!(result.is_ok()); + + let result = tracker.authorize(&info_hash).await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = tracker.authorize(&info_hash).await; + assert!(result.is_err()); + } + } + mod handling_an_announce_request {} mod handling_an_scrape_request { From af949af4b8d11826836a3e1153cb542359bcd404 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 16:07:47 +0000 Subject: [PATCH 366/435] test(tracker): [#207] add tests for torrent persistence in Tracker --- src/tracker/mod.rs | 49 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index e16fefa4f..5de9d6f3c 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -404,6 +404,7 @@ impl Tracker { /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. pub async fn load_persistent_torrents(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; + let mut torrents = self.torrents.write().await; for (info_hash, completed) in persistent_torrents { @@ -577,6 +578,12 @@ mod tests { tracker_factory(configuration) } + pub fn tracker_persisting_torrents_in_database() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.persistent_torrent_completed_stat = true; + tracker_factory(configuration) + } + pub fn tracker_factory(configuration: Configuration) -> Tracker { // code-review: the tracker initialization is duplicated in many places. Consider make this function public. @@ -1123,5 +1130,47 @@ mod tests { mod handling_an_scrape_request {} } + + mod handling_torrent_persistence { + use aquatic_udp_protocol::AnnounceEvent; + + use crate::tracker::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + + #[tokio::test] + async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { + let tracker = tracker_persisting_torrents_in_database(); + + let info_hash = sample_info_hash(); + + let mut peer = sample_peer(); + + peer.event = AnnounceEvent::Started; + let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + assert_eq!(swarm_stats.completed, 0); + + peer.event = AnnounceEvent::Completed; + let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + assert_eq!(swarm_stats.completed, 1); + + let torrents = tracker.get_all_torrent_peers(&info_hash).await; + assert_eq!(torrents.len(), 1); + + // Remove the newly updated torrent from memory + tracker.torrents.write().await.remove(&info_hash); + + tracker.load_persistent_torrents().await.unwrap(); + + let torrents = tracker.get_torrents().await; + assert!(torrents.contains_key(&info_hash)); + + let torrent_entry = torrents.get(&info_hash).unwrap(); + + // It persists the number of completed peers. + assert_eq!(torrent_entry.completed, 1); + + // It does not persist the peers + assert!(torrent_entry.peers.is_empty()); + } + } } } From 7fb92b5c0ac390465159e1567dc1ff92b9e15ced Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 17:25:11 +0000 Subject: [PATCH 367/435] test(tracker): [#207] add tests for whitelist in Tracker --- src/apis/handlers.rs | 4 +- src/setup.rs | 7 ++- src/tracker/mod.rs | 114 ++++++++++++++++++++++++++++++++++++++--- src/tracker/torrent.rs | 4 +- 4 files changed, 116 insertions(+), 13 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index 38959edbe..f7b5e562c 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -86,7 +86,7 @@ pub async fn remove_torrent_from_whitelist_handler( } pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { - match tracker.load_whitelist().await { + match tracker.load_whitelist_from_database().await { Ok(_) => ok_response(), Err(e) => failed_to_reload_whitelist_response(e), } @@ -117,7 +117,7 @@ pub async fn delete_auth_key_handler( } pub async fn reload_keys_handler(State(tracker): State>) -> Response { - match tracker.load_keys().await { + match tracker.load_keys_from_database().await { Ok(_) => ok_response(), Err(e) => failed_to_reload_keys_response(e), } diff --git a/src/setup.rs b/src/setup.rs index 3461667cc..98d311178 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -16,13 +16,16 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve // Load peer keys if tracker.is_private() { - tracker.load_keys().await.expect("Could not retrieve keys from database."); + tracker + .load_keys_from_database() + .await + .expect("Could not retrieve keys from database."); } // Load whitelisted torrents if tracker.is_whitelisted() { tracker - .load_whitelist() + .load_whitelist_from_database() .await .expect("Could not load whitelist from database."); } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 5de9d6f3c..3048ded35 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -38,6 +38,9 @@ pub struct Tracker { #[derive(Debug, PartialEq, Default)] pub struct TorrentsMetrics { + // code-review: consider using `SwamStats` for + // `seeders`, `completed`, and `leechers` attributes. + // pub swam_stats: SwamStats; pub seeders: u64, pub completed: u64, pub leechers: u64, @@ -223,7 +226,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to `load_keys` from the database. - pub async fn load_keys(&self) -> Result<(), databases::error::Error> { + pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.database.load_keys().await?; let mut keys = self.keys.write().await; @@ -301,7 +304,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub async fn load_whitelist(&self) -> Result<(), databases::error::Error> { + pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { let whitelisted_torrents_from_database = self.database.load_whitelist().await?; let mut whitelist = self.whitelist.write().await; @@ -402,7 +405,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub async fn load_persistent_torrents(&self) -> Result<(), databases::error::Error> { + pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; let mut torrents = self.torrents.write().await; @@ -700,6 +703,55 @@ mod tests { ); } + #[tokio::test] + async fn it_should_return_all_the_peers_for_a_given_torrent() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + + let peers = tracker.get_all_torrent_peers(&info_hash).await; + + assert_eq!(peers, vec![peer]); + } + + #[tokio::test] + async fn it_should_return_all_the_peers_for_a_given_torrent_excluding_a_given_peer() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + + let peers = tracker.get_peers_for_peer(&info_hash, &peer).await; + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_the_torrent_metrics() { + let tracker = public_tracker(); + + tracker + .update_torrent_with_peer_and_get_stats(&sample_info_hash(), &leecher()) + .await; + + let torrent_metrics = tracker.get_torrents_metrics().await; + + assert_eq!( + torrent_metrics, + TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 1, + torrents: 1, + } + ); + } + mod for_all_config_modes { mod handling_an_announce_request { @@ -984,6 +1036,55 @@ mod tests { } } + mod handling_the_torrent_whitelist { + use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_add_a_torrent_to_the_whitelist() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + } + + #[tokio::test] + async fn it_should_remove_a_torrent_from_the_whitelist() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + tracker.remove_torrent_from_whitelist(&info_hash).await.unwrap(); + + assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + } + + mod persistence { + use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_load_the_whitelist_from_the_database() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + // Remove torrent from the in-memory whitelist + tracker.whitelist.write().await.remove(&info_hash); + assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + + tracker.load_whitelist_from_database().await.unwrap(); + + assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + } + } + } + mod handling_an_announce_request {} mod handling_an_scrape_request { @@ -1112,7 +1213,7 @@ mod tests { // Remove the newly generated key in memory tracker.keys.write().await.remove(&key.id()); - let result = tracker.load_keys().await; + let result = tracker.load_keys_from_database().await; assert!(result.is_ok()); assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); @@ -1152,13 +1253,10 @@ mod tests { let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; assert_eq!(swarm_stats.completed, 1); - let torrents = tracker.get_all_torrent_peers(&info_hash).await; - assert_eq!(torrents.len(), 1); - // Remove the newly updated torrent from memory tracker.torrents.write().await.remove(&info_hash); - tracker.load_persistent_torrents().await.unwrap(); + tracker.load_torrents_from_database().await.unwrap(); let torrents = tracker.get_torrents().await; assert!(torrents.contains_key(&info_hash)); diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 3362234f0..8675490e2 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -14,6 +14,7 @@ pub struct Entry { pub completed: u32, } +/// Swarm statistics for one torrent. /// Swarm metadata dictionary in the scrape response. /// BEP 48: #[derive(Debug, PartialEq, Default)] @@ -30,7 +31,8 @@ impl SwarmMetadata { } } -/// Swarm statistics. Alternative struct for swarm metadata in scrape response. +/// Swarm statistics for one torrent. +/// Alternative struct for swarm metadata in scrape response. #[derive(Debug, PartialEq, Default)] pub struct SwamStats { pub completed: u32, // The number of peers that have ever completed downloading From d0c30540e11a6dd71cefb8d4736b23adf9b87fe0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 3 Mar 2023 17:50:27 +0000 Subject: [PATCH 368/435] fix(tracker): typo, rename SwamStats to SwarmStats --- .../axum_implementation/responses/announce.rs | 8 +++---- src/http/warp_implementation/handlers.rs | 4 ++-- src/tracker/mod.rs | 22 +++++++++---------- src/tracker/torrent.rs | 2 +- src/udp/handlers.rs | 8 +++---- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/axum_implementation/responses/announce.rs index a91266490..81651767b 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/axum_implementation/responses/announce.rs @@ -90,8 +90,8 @@ impl From for NonCompact { Self { interval: domain_announce_response.interval, interval_min: domain_announce_response.interval_min, - complete: domain_announce_response.swam_stats.seeders, - incomplete: domain_announce_response.swam_stats.leechers, + complete: domain_announce_response.swarm_stats.seeders, + incomplete: domain_announce_response.swarm_stats.leechers, peers, } } @@ -237,8 +237,8 @@ impl From for Compact { Self { interval: domain_announce_response.interval, interval_min: domain_announce_response.interval_min, - complete: domain_announce_response.swam_stats.seeders, - incomplete: domain_announce_response.swam_stats.leechers, + complete: domain_announce_response.swarm_stats.seeders, + incomplete: domain_announce_response.swarm_stats.leechers, peers, } } diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 6019bf016..b803a594f 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -66,7 +66,7 @@ pub async fn handle_announce( send_announce_response( &announce_request, - &response.swam_stats, + &response.swarm_stats, &response.peers, tracker.config.announce_interval, tracker.config.min_announce_interval, @@ -129,7 +129,7 @@ pub async fn handle_scrape( #[allow(clippy::ptr_arg)] fn send_announce_response( announce_request: &request::Announce, - torrent_stats: &torrent::SwamStats, + torrent_stats: &torrent::SwarmStats, peers: &Vec, interval: u32, interval_min: u32, diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 3048ded35..fbda95354 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -19,7 +19,7 @@ use tokio::sync::{RwLock, RwLockReadGuard}; use self::auth::KeyId; use self::error::Error; use self::peer::Peer; -use self::torrent::{SwamStats, SwarmMetadata}; +use self::torrent::{SwarmMetadata, SwarmStats}; use crate::config::Configuration; use crate::databases::driver::Driver; use crate::databases::{self, Database}; @@ -38,9 +38,9 @@ pub struct Tracker { #[derive(Debug, PartialEq, Default)] pub struct TorrentsMetrics { - // code-review: consider using `SwamStats` for + // code-review: consider using `SwarmStats` for // `seeders`, `completed`, and `leechers` attributes. - // pub swam_stats: SwamStats; + // pub swarm_stats: SwarmStats; pub seeders: u64, pub completed: u64, pub leechers: u64, @@ -50,7 +50,7 @@ pub struct TorrentsMetrics { #[derive(Debug, PartialEq, Default)] pub struct AnnounceData { pub peers: Vec, - pub swam_stats: SwamStats, + pub swarm_stats: SwarmStats, pub interval: u32, pub interval_min: u32, } @@ -147,13 +147,13 @@ impl Tracker { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); - let swam_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let swarm_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; let peers = self.get_peers_for_peer(info_hash, peer).await; AnnounceData { peers, - swam_stats, + swarm_stats, interval: self.config.announce_interval, interval_min: self.config.min_announce_interval, } @@ -446,7 +446,7 @@ impl Tracker { } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwamStats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { // code-review: consider splitting the function in two (command and query segregation). // `update_torrent_with_peer` and `get_stats` @@ -469,7 +469,7 @@ impl Tracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrent::SwamStats { + torrent::SwarmStats { completed, seeders, leechers, @@ -898,7 +898,7 @@ mod tests { let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.swam_stats.seeders, 1); + assert_eq!(announce_data.swarm_stats.seeders, 1); } #[tokio::test] @@ -909,7 +909,7 @@ mod tests { let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; - assert_eq!(announce_data.swam_stats.leechers, 1); + assert_eq!(announce_data.swarm_stats.leechers, 1); } #[tokio::test] @@ -923,7 +923,7 @@ mod tests { let mut completed_peer = completed_peer(); let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()).await; - assert_eq!(announce_data.swam_stats.completed, 1); + assert_eq!(announce_data.swarm_stats.completed, 1); } } } diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 8675490e2..4a871aa89 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -34,7 +34,7 @@ impl SwarmMetadata { /// Swarm statistics for one torrent. /// Alternative struct for swarm metadata in scrape response. #[derive(Debug, PartialEq, Default)] -pub struct SwamStats { +pub struct SwarmStats { pub completed: u32, // The number of peers that have ever completed downloading pub seeders: u32, // The number of active peers that have completed downloading (seeders) pub leechers: u32, // The number of active peers that have not completed downloading (leechers) diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 6c54a6106..8fda77fb4 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -136,8 +136,8 @@ pub async fn handle_announce( Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swam_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(response.swam_stats.seeders) as i32), + leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), peers: response .peers .iter() @@ -157,8 +157,8 @@ pub async fn handle_announce( Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swam_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(response.swam_stats.seeders) as i32), + leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), peers: response .peers .iter() From 12c8cf9916492962e1be19f6aae7fc8aa826ece9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 5 Mar 2023 11:39:49 +0000 Subject: [PATCH 369/435] refactor: ExpiringKey has always an expiration date Adter intruducing the `KeyId` we no longer needed to have keys with no expiration date. A key without an expiration date is a `KeyId`. So all `ExpiraingKeys` have an expiration date. --- src/apis/resources/auth_key.rs | 19 ++++--- src/databases/mysql.rs | 6 +-- src/databases/sqlite.rs | 6 +-- src/tracker/auth.rs | 98 +++++----------------------------- src/tracker/mod.rs | 2 +- 5 files changed, 32 insertions(+), 99 deletions(-) diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index e9989ca75..289e704b6 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -7,17 +7,20 @@ use crate::tracker::auth::{self, KeyId}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { - pub key: String, // todo: rename to `id` - pub valid_until: Option, + pub key: String, // todo: rename to `id` (API breaking change!) + pub valid_until: Option, // todo: `auth::ExpiringKey` has now always a value (API breaking change!) } impl From for auth::ExpiringKey { fn from(auth_key_resource: AuthKey) -> Self { + let valid_until = match auth_key_resource.valid_until { + Some(valid_until) => DurationSinceUnixEpoch::from_secs(valid_until), + None => DurationSinceUnixEpoch::from_secs(0), + }; + auth::ExpiringKey { id: auth_key_resource.key.parse::().unwrap(), - valid_until: auth_key_resource - .valid_until - .map(|valid_until| DurationSinceUnixEpoch::new(valid_until, 0)), + valid_until, } } } @@ -26,7 +29,7 @@ impl From for AuthKey { fn from(auth_key: auth::ExpiringKey) -> Self { AuthKey { key: auth_key.id.to_string(), - valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), + valid_until: Some(auth_key.valid_until.as_secs()), } } } @@ -52,7 +55,7 @@ mod tests { auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) + valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap() } ); } @@ -63,7 +66,7 @@ mod tests { let auth_key = auth::ExpiringKey { id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), + valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap(), }; assert_eq!( diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 0d545aaa9..cbd5f3df9 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -118,7 +118,7 @@ impl Database for Mysql { "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| auth::ExpiringKey { id: key.parse::().unwrap(), - valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + valid_until: Duration::from_secs(valid_until.unsigned_abs()), }, )?; @@ -193,7 +193,7 @@ impl Database for Mysql { Ok(key.map(|(key, expiry)| auth::ExpiringKey { id: key.parse::().unwrap(), - valid_until: Some(Duration::from_secs(expiry.unsigned_abs())), + valid_until: Duration::from_secs(expiry.unsigned_abs()), })) } @@ -201,7 +201,7 @@ impl Database for Mysql { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.id.to_string(); - let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); + let valid_until = auth_key.valid_until.as_secs().to_string(); conn.exec_drop( "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index ab0addf4b..974f172e0 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -113,7 +113,7 @@ impl Database for Sqlite { Ok(auth::ExpiringKey { id: key.parse::().unwrap(), - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), + valid_until: DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs()), }) })?; @@ -214,7 +214,7 @@ impl Database for Sqlite { let id: String = f.get(0).unwrap(); auth::ExpiringKey { id: id.parse::().unwrap(), - valid_until: Some(DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs())), + valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), } })) } @@ -224,7 +224,7 @@ impl Database for Sqlite { let insert = conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.id.to_string(), auth_key.valid_until.unwrap().as_secs().to_string()], + [auth_key.id.to_string(), auth_key.valid_until.as_secs().to_string()], )?; if insert == 0 { diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 01de7a619..f8e1b3440 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -30,7 +30,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { ExpiringKey { id: random_id.parse::().unwrap(), - valid_until: Some(Current::add(&lifetime).unwrap()), + valid_until: Current::add(&lifetime).unwrap(), } } @@ -42,30 +42,19 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = Current::now(); - match auth_key.valid_until { - Some(valid_until) => { - if valid_until < current_time { - Err(Error::KeyExpired { - location: Location::caller(), - }) - } else { - Ok(()) - } - } - None => Err(Error::UnableToReadKey { + if auth_key.valid_until < current_time { + Err(Error::KeyExpired { location: Location::caller(), - key_id: Box::new(auth_key.id.clone()), - }), + }) + } else { + Ok(()) } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct ExpiringKey { pub id: KeyId, - // todo: we can remove the `Option`. An `ExpiringKey` that does not expire - // is a `KeyId`. In other words, all `ExpiringKeys` must have an - // expiration time. - pub valid_until: Option, + pub valid_until: DurationSinceUnixEpoch, } impl std::fmt::Display for ExpiringKey { @@ -74,54 +63,18 @@ impl std::fmt::Display for ExpiringKey { f, "key: `{}`, valid until `{}`", self.id, - match self.valid_until { - Some(duration) => format!( - "{}", - DateTime::::from_utc( - NaiveDateTime::from_timestamp( - i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), - duration.subsec_nanos(), - ), - Utc - ) + DateTime::::from_utc( + NaiveDateTime::from_timestamp( + i64::try_from(self.valid_until.as_secs()).expect("Overflow of i64 seconds, very future!"), + self.valid_until.subsec_nanos(), ), - None => "Empty!?".to_string(), - } + Utc + ) ) } } impl ExpiringKey { - /// # Panics - /// - /// Will panic if bytes cannot be converted into a valid `KeyId`. - #[must_use] - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { - if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(ExpiringKey { - id: key.parse::().unwrap(), - valid_until: None, - }) - } else { - None - } - } - - /// # Panics - /// - /// Will panic if string cannot be converted into a valid `KeyId`. - #[must_use] - pub fn from_string(key: &str) -> Option { - if key.len() == AUTH_KEY_LENGTH { - Some(ExpiringKey { - id: key.parse::().unwrap(), - valid_until: None, - }) - } else { - None - } - } - #[must_use] pub fn id(&self) -> KeyId { self.id.clone() @@ -176,30 +129,7 @@ mod tests { use std::time::Duration; use crate::protocol::clock::{Current, StoppedTime}; - use crate::tracker::auth::{self, KeyId}; - - #[test] - fn auth_key_from_buffer() { - let auth_key = auth::ExpiringKey::from_buffer([ - 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, - 49, 52, 110, 114, 74, - ]); - - assert!(auth_key.is_some()); - assert_eq!( - auth_key.unwrap().id, - "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse::().unwrap() - ); - } - - #[test] - fn auth_key_from_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = auth::ExpiringKey::from_string(key_string); - - assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().id, key_string.parse::().unwrap()); - } + use crate::tracker::auth; #[test] fn auth_key_id_from_string() { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index fbda95354..0d04868a8 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1147,7 +1147,7 @@ mod tests { let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - assert_eq!(key.valid_until.unwrap(), Duration::from_secs(100)); + assert_eq!(key.valid_until, Duration::from_secs(100)); } #[tokio::test] From c7015fad532983bb4661d510c3dd5cdb46c45acd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 5 Mar 2023 11:46:26 +0000 Subject: [PATCH 370/435] refactor: rename struct KeyId to Key There is no longer a clonflict with the `ExpiringKey` strcut that was also called `Key`. --- src/apis/handlers.rs | 4 ++-- src/apis/resources/auth_key.rs | 12 ++++++------ src/databases/mysql.rs | 6 +++--- src/databases/sqlite.rs | 6 +++--- src/http/axum_implementation/extractors/key.rs | 6 +++--- src/http/warp_implementation/filters.rs | 8 ++++---- src/http/warp_implementation/handlers.rs | 8 ++++---- src/tracker/auth.rs | 14 +++++++------- src/tracker/error.rs | 2 +- src/tracker/mod.rs | 16 ++++++++-------- tests/http/client.rs | 6 +++--- tests/http/connection_info.rs | 4 ++-- tests/http_tracker.rs | 16 ++++++++-------- tests/tracker_api.rs | 4 ++-- 14 files changed, 56 insertions(+), 56 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index f7b5e562c..652f491e5 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -17,7 +17,7 @@ use crate::apis::resources::auth_key::AuthKey; use crate::apis::resources::stats::Stats; use crate::apis::resources::torrent::ListItem; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::KeyId; +use crate::tracker::auth::Key; use crate::tracker::services::statistics::get_metrics; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; @@ -107,7 +107,7 @@ pub async fn delete_auth_key_handler( State(tracker): State>, Path(seconds_valid_or_key): Path, ) -> Response { - match KeyId::from_str(&seconds_valid_or_key.0) { + match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { Ok(_) => ok_response(), diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index 289e704b6..954e633d0 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -3,11 +3,11 @@ use std::convert::From; use serde::{Deserialize, Serialize}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::tracker::auth::{self, KeyId}; +use crate::tracker::auth::{self, Key}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { - pub key: String, // todo: rename to `id` (API breaking change!) + pub key: String, // todo: rename to `id` (API breaking change!) pub valid_until: Option, // todo: `auth::ExpiringKey` has now always a value (API breaking change!) } @@ -19,7 +19,7 @@ impl From for auth::ExpiringKey { }; auth::ExpiringKey { - id: auth_key_resource.key.parse::().unwrap(), + id: auth_key_resource.key.parse::().unwrap(), valid_until, } } @@ -40,7 +40,7 @@ mod tests { use super::AuthKey; use crate::protocol::clock::{Current, TimeNow}; - use crate::tracker::auth::{self, KeyId}; + use crate::tracker::auth::{self, Key}; #[test] fn it_should_be_convertible_into_an_auth_key() { @@ -54,7 +54,7 @@ mod tests { assert_eq!( auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { - id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap() } ); @@ -65,7 +65,7 @@ mod tests { let duration_in_secs = 60; let auth_key = auth::ExpiringKey { - id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap(), }; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index cbd5f3df9..00865d026 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -12,7 +12,7 @@ use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::{self, KeyId}; +use crate::tracker::auth::{self, Key}; const DRIVER: Driver = Driver::MySQL; @@ -117,7 +117,7 @@ impl Database for Mysql { let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| auth::ExpiringKey { - id: key.parse::().unwrap(), + id: key.parse::().unwrap(), valid_until: Duration::from_secs(valid_until.unsigned_abs()), }, )?; @@ -192,7 +192,7 @@ impl Database for Mysql { let key = query?; Ok(key.map(|(key, expiry)| auth::ExpiringKey { - id: key.parse::().unwrap(), + id: key.parse::().unwrap(), valid_until: Duration::from_secs(expiry.unsigned_abs()), })) } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 974f172e0..6c5b9f600 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -9,7 +9,7 @@ use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::{self, KeyId}; +use crate::tracker::auth::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; @@ -112,7 +112,7 @@ impl Database for Sqlite { let valid_until: i64 = row.get(1)?; Ok(auth::ExpiringKey { - id: key.parse::().unwrap(), + id: key.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs()), }) })?; @@ -213,7 +213,7 @@ impl Database for Sqlite { let expiry: i64 = f.get(1).unwrap(); let id: String = f.get(0).unwrap(); auth::ExpiringKey { - id: id.parse::().unwrap(), + id: id.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), } })) diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs index 6cc2f13e8..ecdc9d801 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/key.rs @@ -7,9 +7,9 @@ use axum::response::{IntoResponse, Response}; use crate::http::axum_implementation::handlers::auth::{self, KeyIdParam}; use crate::http::axum_implementation::responses; -use crate::tracker::auth::KeyId; +use crate::tracker::auth::Key; -pub struct ExtractKeyId(pub KeyId); +pub struct ExtractKeyId(pub Key); #[async_trait] impl FromRequestParts for ExtractKeyId @@ -21,7 +21,7 @@ where async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { match Path::::from_request_parts(parts, state).await { Ok(key_id_param) => { - let Ok(key_id) = key_id_param.0.value().parse::() else { + let Ok(key_id) = key_id_param.0.value().parse::() else { return Err(responses::error::Error::from( auth::Error::InvalidKeyFormat { location: Location::caller() diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index eb7abcd4d..a3000bfaa 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -12,7 +12,7 @@ use super::{request, WebResult}; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::KeyId; +use crate::tracker::auth::Key; use crate::tracker::{self, peer}; /// Pass Arc along @@ -37,16 +37,16 @@ pub fn with_peer_id() -> impl Filter + /// Pass Arc along #[must_use] -pub fn with_auth_key_id() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key_id() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() .map(|key: String| { - let key_id = KeyId::from_str(&key); + let key_id = Key::from_str(&key); match key_id { Ok(id) => Some(id), Err(_) => None, } }) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } /// Check for `PeerAddress` diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index b803a594f..4a64259bb 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -12,7 +12,7 @@ use super::error::Error; use super::{request, response, WebResult}; use crate::http::warp_implementation::peer_builder; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::KeyId; +use crate::tracker::auth::Key; use crate::tracker::{self, auth, peer, statistics, torrent}; /// Authenticate `InfoHash` using optional `auth::Key` @@ -22,7 +22,7 @@ use crate::tracker::{self, auth, peer, statistics, torrent}; /// Will return `ServerError` that wraps the `tracker::error::Error` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, - auth_key_id: &Option, + auth_key_id: &Option, tracker: Arc, ) -> Result<(), Error> { tracker @@ -38,7 +38,7 @@ pub async fn authenticate( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key_id: Option, + auth_key_id: Option, tracker: Arc, ) -> WebResult { debug!("http announce request: {:#?}", announce_request); @@ -78,7 +78,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key_id: Option, + auth_key_id: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index f8e1b3440..2f65b2bcb 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -29,7 +29,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); ExpiringKey { - id: random_id.parse::().unwrap(), + id: random_id.parse::().unwrap(), valid_until: Current::add(&lifetime).unwrap(), } } @@ -53,7 +53,7 @@ pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct ExpiringKey { - pub id: KeyId, + pub id: Key, pub valid_until: DurationSinceUnixEpoch, } @@ -76,18 +76,18 @@ impl std::fmt::Display for ExpiringKey { impl ExpiringKey { #[must_use] - pub fn id(&self) -> KeyId { + pub fn id(&self) -> Key { self.id.clone() } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] -pub struct KeyId(String); +pub struct Key(String); #[derive(Debug, PartialEq, Eq)] pub struct ParseKeyIdError; -impl FromStr for KeyId { +impl FromStr for Key { type Err = ParseKeyIdError; fn from_str(s: &str) -> Result { @@ -109,7 +109,7 @@ pub enum Error { #[error("Failed to read key: {key_id}, {location}")] UnableToReadKey { location: &'static Location<'static>, - key_id: Box, + key_id: Box, }, #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, @@ -134,7 +134,7 @@ mod tests { #[test] fn auth_key_id_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key_id = auth::KeyId::from_str(key_string); + let auth_key_id = auth::Key::from_str(key_string); assert!(auth_key_id.is_ok()); assert_eq!(auth_key_id.unwrap().to_string(), key_string); diff --git a/src/tracker/error.rs b/src/tracker/error.rs index acc85a1c2..f03f4b3e5 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -6,7 +6,7 @@ use crate::located_error::LocatedError; pub enum Error { #[error("The supplied key: {key_id:?}, is not valid: {source}")] PeerKeyNotValid { - key_id: super::auth::KeyId, + key_id: super::auth::Key, source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, #[error("The peer is not authenticated, {location}")] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 0d04868a8..448905ef7 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -16,7 +16,7 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use self::auth::KeyId; +use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::{SwarmMetadata, SwarmStats}; @@ -28,7 +28,7 @@ use crate::protocol::info_hash::InfoHash; pub struct Tracker { pub config: Arc, mode: mode::Mode, - keys: RwLock>, + keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, stats_event_sender: Option>, @@ -204,14 +204,14 @@ impl Tracker { pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { // todo: change argument `key: &str` to `key_id: &KeyId` self.database.remove_key_from_keys(key).await?; - self.keys.write().await.remove(&key.parse::().unwrap()); + self.keys.write().await.remove(&key.parse::().unwrap()); Ok(()) } /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, key_id: &KeyId) -> Result<(), auth::Error> { + pub async fn verify_auth_key(&self, key_id: &Key) -> Result<(), auth::Error> { // code-review: this function is public only because it's used in a test. // We should change the test and make it private. match self.keys.read().await.get(key_id) { @@ -324,7 +324,7 @@ impl Tracker { /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. /// /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { // todo: this is a deprecated method. // We're splitting authentication and authorization responsibilities. // Use `authenticate` and `authorize` instead. @@ -371,7 +371,7 @@ impl Tracker { /// # Errors /// /// Will return an error if the the authentication key cannot be verified. - pub async fn authenticate(&self, key_id: &KeyId) -> Result<(), auth::Error> { + pub async fn authenticate(&self, key_id: &Key) -> Result<(), auth::Error> { if self.is_private() { self.verify_auth_key(key_id).await } else { @@ -1165,7 +1165,7 @@ mod tests { async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { let tracker = private_tracker(); - let unregistered_key_id = auth::KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key_id = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let result = tracker.authenticate(&unregistered_key_id).await; @@ -1187,7 +1187,7 @@ mod tests { async fn it_should_fail_verifying_an_unregistered_authentication_key() { let tracker = private_tracker(); - let unregistered_key_id = auth::KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key_id = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); assert!(tracker.verify_auth_key(&unregistered_key_id).await.is_err()); } diff --git a/tests/http/client.rs b/tests/http/client.rs index b59cf2ac6..fa5fd5d16 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; use reqwest::{Client as ReqwestClient, Response}; -use torrust_tracker::tracker::auth::KeyId; +use torrust_tracker::tracker::auth::Key; use super::connection_info::ConnectionInfo; use super::requests::announce::{self, Query}; @@ -11,7 +11,7 @@ use super::requests::scrape; pub struct Client { connection_info: ConnectionInfo, reqwest_client: ReqwestClient, - key_id: Option, + key_id: Option, } /// URL components in this context: @@ -40,7 +40,7 @@ impl Client { } } - pub fn authenticated(connection_info: ConnectionInfo, key_id: KeyId) -> Self { + pub fn authenticated(connection_info: ConnectionInfo, key_id: Key) -> Self { Self { connection_info, reqwest_client: reqwest::Client::builder().build().unwrap(), diff --git a/tests/http/connection_info.rs b/tests/http/connection_info.rs index fb1dbf64e..eedaa73f0 100644 --- a/tests/http/connection_info.rs +++ b/tests/http/connection_info.rs @@ -1,9 +1,9 @@ -use torrust_tracker::tracker::auth::KeyId; +use torrust_tracker::tracker::auth::Key; #[derive(Clone, Debug)] pub struct ConnectionInfo { pub bind_address: String, - pub key_id: Option, + pub key_id: Option, } impl ConnectionInfo { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 2360df9ab..b1b90f923 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1083,7 +1083,7 @@ mod warp_http_tracker_server { use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use crate::http::asserts::assert_is_announce_response; use crate::http::asserts_warp::{ @@ -1128,7 +1128,7 @@ mod warp_http_tracker_server { let http_tracker_server = start_private_http_tracker(Version::Warp).await; // The tracker does not have this key - let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key_id = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) .announce(&QueryBuilder::default().query()) @@ -1145,7 +1145,7 @@ mod warp_http_tracker_server { use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; @@ -1242,7 +1242,7 @@ mod warp_http_tracker_server { ) .await; - let false_key_id: KeyId = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key_id: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) .scrape( @@ -2396,7 +2396,7 @@ mod axum_http_tracker_server { use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::http::client::Client; @@ -2453,7 +2453,7 @@ mod axum_http_tracker_server { let http_tracker_server = start_private_http_tracker(Version::Axum).await; // The tracker does not have this key - let unregistered_key_id = KeyId::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key_id = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) .announce(&QueryBuilder::default().query()) @@ -2470,7 +2470,7 @@ mod axum_http_tracker_server { use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; use crate::common::fixtures::PeerBuilder; @@ -2583,7 +2583,7 @@ mod axum_http_tracker_server { ) .await; - let false_key_id: KeyId = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key_id: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) .scrape( diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index bec22e2b4..2c59cd8fb 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -638,7 +638,7 @@ mod tracker_apis { mod for_key_resources { use std::time::Duration; - use torrust_tracker::tracker::auth::KeyId; + use torrust_tracker::tracker::auth::Key; use crate::api::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, @@ -665,7 +665,7 @@ mod tracker_apis { // Verify the key with the tracker assert!(api_server .tracker - .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); } From 7b690a4c6a63872ce411adbaf198b43381dcb4c4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 5 Mar 2023 11:59:45 +0000 Subject: [PATCH 371/435] refactor: rename ExpiringKey::id to ExpiringKey:key --- src/apis/resources/auth_key.rs | 8 ++++---- src/databases/mysql.rs | 6 +++--- src/databases/sqlite.rs | 6 +++--- src/tracker/auth.rs | 8 ++++---- src/tracker/mod.rs | 4 ++-- tests/tracker_api.rs | 8 ++++---- 6 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/apis/resources/auth_key.rs b/src/apis/resources/auth_key.rs index 954e633d0..72ef32a95 100644 --- a/src/apis/resources/auth_key.rs +++ b/src/apis/resources/auth_key.rs @@ -19,7 +19,7 @@ impl From for auth::ExpiringKey { }; auth::ExpiringKey { - id: auth_key_resource.key.parse::().unwrap(), + key: auth_key_resource.key.parse::().unwrap(), valid_until, } } @@ -28,7 +28,7 @@ impl From for auth::ExpiringKey { impl From for AuthKey { fn from(auth_key: auth::ExpiringKey) -> Self { AuthKey { - key: auth_key.id.to_string(), + key: auth_key.key.to_string(), valid_until: Some(auth_key.valid_until.as_secs()), } } @@ -54,7 +54,7 @@ mod tests { assert_eq!( auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { - id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap() } ); @@ -65,7 +65,7 @@ mod tests { let duration_in_secs = 60; let auth_key = auth::ExpiringKey { - id: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap(), }; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 00865d026..4bb28f050 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -117,7 +117,7 @@ impl Database for Mysql { let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, i64)| auth::ExpiringKey { - id: key.parse::().unwrap(), + key: key.parse::().unwrap(), valid_until: Duration::from_secs(valid_until.unsigned_abs()), }, )?; @@ -192,7 +192,7 @@ impl Database for Mysql { let key = query?; Ok(key.map(|(key, expiry)| auth::ExpiringKey { - id: key.parse::().unwrap(), + key: key.parse::().unwrap(), valid_until: Duration::from_secs(expiry.unsigned_abs()), })) } @@ -200,7 +200,7 @@ impl Database for Mysql { async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let key = auth_key.id.to_string(); + let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.as_secs().to_string(); conn.exec_drop( diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 6c5b9f600..8fac09e47 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -112,7 +112,7 @@ impl Database for Sqlite { let valid_until: i64 = row.get(1)?; Ok(auth::ExpiringKey { - id: key.parse::().unwrap(), + key: key.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs()), }) })?; @@ -213,7 +213,7 @@ impl Database for Sqlite { let expiry: i64 = f.get(1).unwrap(); let id: String = f.get(0).unwrap(); auth::ExpiringKey { - id: id.parse::().unwrap(), + key: id.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), } })) @@ -224,7 +224,7 @@ impl Database for Sqlite { let insert = conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.id.to_string(), auth_key.valid_until.as_secs().to_string()], + [auth_key.key.to_string(), auth_key.valid_until.as_secs().to_string()], )?; if insert == 0 { diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 2f65b2bcb..09f324e2b 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -29,7 +29,7 @@ pub fn generate(lifetime: Duration) -> ExpiringKey { debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); ExpiringKey { - id: random_id.parse::().unwrap(), + key: random_id.parse::().unwrap(), valid_until: Current::add(&lifetime).unwrap(), } } @@ -53,7 +53,7 @@ pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct ExpiringKey { - pub id: Key, + pub key: Key, pub valid_until: DurationSinceUnixEpoch, } @@ -62,7 +62,7 @@ impl std::fmt::Display for ExpiringKey { write!( f, "key: `{}`, valid until `{}`", - self.id, + self.key, DateTime::::from_utc( NaiveDateTime::from_timestamp( i64::try_from(self.valid_until.as_secs()).expect("Overflow of i64 seconds, very future!"), @@ -77,7 +77,7 @@ impl std::fmt::Display for ExpiringKey { impl ExpiringKey { #[must_use] pub fn id(&self) -> Key { - self.id.clone() + self.key.clone() } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 448905ef7..34977b4de 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -190,7 +190,7 @@ impl Tracker { pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; - self.keys.write().await.insert(auth_key.id.clone(), auth_key.clone()); + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) } @@ -233,7 +233,7 @@ impl Tracker { keys.clear(); for key in keys_from_database { - keys.insert(key.id.clone(), key); + keys.insert(key.key.clone(), key); } Ok(()) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 2c59cd8fb..600d26f2f 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -734,7 +734,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.id.to_string()) + .delete_auth_key(&auth_key.key.to_string()) .await; assert_ok(response).await; @@ -777,7 +777,7 @@ mod tracker_apis { force_database_error(&api_server.tracker); let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(&auth_key.id.to_string()) + .delete_auth_key(&auth_key.key.to_string()) .await; assert_failed_to_delete_key(response).await; @@ -797,7 +797,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.id.to_string()) + .delete_auth_key(&auth_key.key.to_string()) .await; assert_token_not_valid(response).await; @@ -810,7 +810,7 @@ mod tracker_apis { .unwrap(); let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.id.to_string()) + .delete_auth_key(&auth_key.key.to_string()) .await; assert_unauthorized(response).await; From af038dedc0876bda2809505998502e2067ebdb89 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 5 Mar 2023 12:17:39 +0000 Subject: [PATCH 372/435] refactor: rename structs, attributes and variables with sufix KeyId or key_id to `Key` and `key`. removing the `Id` and `id`, since the `KeyId` struct was renamed to `Key`. --- src/apis/handlers.rs | 6 ++--- src/databases/mod.rs | 4 ++-- .../axum_implementation/extractors/key.rs | 14 +++++------ .../axum_implementation/handlers/announce.rs | 6 ++--- src/http/axum_implementation/handlers/auth.rs | 4 ++-- .../axum_implementation/handlers/scrape.rs | 6 ++--- src/http/warp_implementation/filters.rs | 6 ++--- src/http/warp_implementation/handlers.rs | 12 +++++----- src/http/warp_implementation/routes.rs | 6 ++--- src/tracker/auth.rs | 18 +++++++------- src/tracker/error.rs | 4 ++-- src/tracker/mod.rs | 24 +++++++++---------- tests/http/client.rs | 18 +++++++------- tests/http/connection_info.rs | 4 ++-- tests/http_tracker.rs | 24 +++++++++---------- tests/tracker_api.rs | 10 ++++---- 16 files changed, 83 insertions(+), 83 deletions(-) diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs index 652f491e5..410def39b 100644 --- a/src/apis/handlers.rs +++ b/src/apis/handlers.rs @@ -101,15 +101,15 @@ pub async fn generate_auth_key_handler(State(tracker): State>, Path } #[derive(Deserialize)] -pub struct KeyIdParam(String); +pub struct KeyParam(String); pub async fn delete_auth_key_handler( State(tracker): State>, - Path(seconds_valid_or_key): Path, + Path(seconds_valid_or_key): Path, ) -> Response { match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), - Ok(key_id) => match tracker.remove_auth_key(&key_id.to_string()).await { + Ok(key) => match tracker.remove_auth_key(&key.to_string()).await { Ok(_) => ok_response(), Err(e) => failed_to_delete_key_response(e), }, diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 038be0ea3..247f571d7 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -70,12 +70,12 @@ pub trait Database: Sync + Send { async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - // todo: replace type `&str` with `&KeyId` + // todo: replace type `&str` with `&Key` async fn get_key_from_keys(&self, key: &str) -> Result, Error>; async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; - // todo: replace type `&str` with `&KeyId` + // todo: replace type `&str` with `&Key` async fn remove_key_from_keys(&self, key: &str) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs index ecdc9d801..50aef4a7c 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/key.rs @@ -5,30 +5,30 @@ use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::axum_implementation::handlers::auth::{self, KeyIdParam}; +use crate::http::axum_implementation::handlers::auth::{self, KeyParam}; use crate::http::axum_implementation::responses; use crate::tracker::auth::Key; -pub struct ExtractKeyId(pub Key); +pub struct Extract(pub Key); #[async_trait] -impl FromRequestParts for ExtractKeyId +impl FromRequestParts for Extract where S: Send + Sync, { type Rejection = Response; async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - match Path::::from_request_parts(parts, state).await { - Ok(key_id_param) => { - let Ok(key_id) = key_id_param.0.value().parse::() else { + match Path::::from_request_parts(parts, state).await { + Ok(key_param) => { + let Ok(key) = key_param.0.value().parse::() else { return Err(responses::error::Error::from( auth::Error::InvalidKeyFormat { location: Location::caller() }) .into_response()) }; - Ok(ExtractKeyId(key_id)) + Ok(Extract(key)) } Err(rejection) => match rejection { axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 93dbc8115..4bb06da73 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -8,7 +8,7 @@ use axum::response::{IntoResponse, Response}; use log::debug; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::key::ExtractKeyId; +use crate::http::axum_implementation::extractors::key::Extract; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::handlers::auth; @@ -41,12 +41,12 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - ExtractKeyId(key_id): ExtractKeyId, + Extract(key): Extract, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http announce request: {:#?}", announce_request); - match tracker.authenticate(&key_id).await { + match tracker.authenticate(&key).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), } diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/auth.rs index 5673ea851..b1b73e60e 100644 --- a/src/http/axum_implementation/handlers/auth.rs +++ b/src/http/axum_implementation/handlers/auth.rs @@ -7,9 +7,9 @@ use crate::http::axum_implementation::responses; use crate::tracker::auth; #[derive(Deserialize)] -pub struct KeyIdParam(String); +pub struct KeyParam(String); -impl KeyIdParam { +impl KeyParam { #[must_use] pub fn value(&self) -> String { self.0.clone() diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 19d902f8e..41d6bf3dc 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -4,7 +4,7 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::axum_implementation::extractors::key::ExtractKeyId; +use crate::http::axum_implementation::extractors::key::Extract; use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; @@ -31,12 +31,12 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - ExtractKeyId(key_id): ExtractKeyId, + Extract(key): Extract, remote_client_ip: RemoteClientIp, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - match tracker.authenticate(&key_id).await { + match tracker.authenticate(&key).await { Ok(_) => (), Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, } diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs index a3000bfaa..06168d82a 100644 --- a/src/http/warp_implementation/filters.rs +++ b/src/http/warp_implementation/filters.rs @@ -37,11 +37,11 @@ pub fn with_peer_id() -> impl Filter + /// Pass Arc along #[must_use] -pub fn with_auth_key_id() -> impl Filter,), Error = Infallible> + Clone { +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() .map(|key: String| { - let key_id = Key::from_str(&key); - match key_id { + let key = Key::from_str(&key); + match key { Ok(id) => Some(id), Err(_) => None, } diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs index 4a64259bb..f9aedeb8f 100644 --- a/src/http/warp_implementation/handlers.rs +++ b/src/http/warp_implementation/handlers.rs @@ -22,11 +22,11 @@ use crate::tracker::{self, auth, peer, statistics, torrent}; /// Will return `ServerError` that wraps the `tracker::error::Error` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, - auth_key_id: &Option, + auth_key: &Option, tracker: Arc, ) -> Result<(), Error> { tracker - .authenticate_request(info_hash, auth_key_id) + .authenticate_request(info_hash, auth_key) .await .map_err(|e| Error::TrackerError { source: (Arc::new(e) as Arc).into(), @@ -38,7 +38,7 @@ pub async fn authenticate( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. pub async fn handle_announce( announce_request: request::Announce, - auth_key_id: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { debug!("http announce request: {:#?}", announce_request); @@ -46,7 +46,7 @@ pub async fn handle_announce( let info_hash = announce_request.info_hash; let remote_client_ip = announce_request.peer_addr; - authenticate(&info_hash, &auth_key_id, tracker.clone()).await?; + authenticate(&info_hash, &auth_key, tracker.clone()).await?; let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); @@ -78,7 +78,7 @@ pub async fn handle_announce( /// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( scrape_request: request::Scrape, - auth_key_id: Option, + auth_key: Option, tracker: Arc, ) -> WebResult { let mut files: HashMap = HashMap::new(); @@ -87,7 +87,7 @@ pub async fn handle_scrape( for info_hash in &scrape_request.info_hashes { let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { - if authenticate(info_hash, &auth_key_id, tracker.clone()).await.is_ok() { + if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); response::ScrapeEntry { complete: seeders, diff --git a/src/http/warp_implementation/routes.rs b/src/http/warp_implementation/routes.rs index 2ee60e8c9..c46c502e4 100644 --- a/src/http/warp_implementation/routes.rs +++ b/src/http/warp_implementation/routes.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use super::filters::{with_announce_request, with_auth_key_id, with_scrape_request, with_tracker}; +use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; use super::handlers::{handle_announce, handle_scrape, send_error}; use crate::tracker; @@ -20,7 +20,7 @@ fn announce(tracker: Arc) -> impl Filter) -> impl Filter Result { if s.len() != AUTH_KEY_LENGTH { - return Err(ParseKeyIdError); + return Err(ParseKeyError); } Ok(Self(s.to_string())) @@ -106,10 +106,10 @@ pub enum Error { KeyVerificationError { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, - #[error("Failed to read key: {key_id}, {location}")] + #[error("Failed to read key: {key}, {location}")] UnableToReadKey { location: &'static Location<'static>, - key_id: Box, + key: Box, }, #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, @@ -132,12 +132,12 @@ mod tests { use crate::tracker::auth; #[test] - fn auth_key_id_from_string() { + fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key_id = auth::Key::from_str(key_string); + let auth_key = auth::Key::from_str(key_string); - assert!(auth_key_id.is_ok()); - assert_eq!(auth_key_id.unwrap().to_string(), key_string); + assert!(auth_key.is_ok()); + assert_eq!(auth_key.unwrap().to_string(), key_string); } #[test] diff --git a/src/tracker/error.rs b/src/tracker/error.rs index f03f4b3e5..51bcbf3bb 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -4,9 +4,9 @@ use crate::located_error::LocatedError; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { - #[error("The supplied key: {key_id:?}, is not valid: {source}")] + #[error("The supplied key: {key:?}, is not valid: {source}")] PeerKeyNotValid { - key_id: super::auth::Key, + key: super::auth::Key, source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, #[error("The peer is not authenticated, {location}")] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 34977b4de..2ebc4bfc3 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -200,9 +200,9 @@ impl Tracker { /// /// # Panics /// - /// Will panic if key cannot be converted into a valid `KeyId`. + /// Will panic if key cannot be converted into a valid `Key`. pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { - // todo: change argument `key: &str` to `key_id: &KeyId` + // todo: change argument `key: &str` to `key: &Key` self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(&key.parse::().unwrap()); Ok(()) @@ -211,13 +211,13 @@ impl Tracker { /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, key_id: &Key) -> Result<(), auth::Error> { + pub async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> { // code-review: this function is public only because it's used in a test. // We should change the test and make it private. - match self.keys.read().await.get(key_id) { + match self.keys.read().await.get(key) { None => Err(auth::Error::UnableToReadKey { location: Location::caller(), - key_id: Box::new(key_id.clone()), + key: Box::new(key.clone()), }), Some(key) => auth::verify(key), } @@ -342,7 +342,7 @@ impl Tracker { Some(key) => { if let Err(e) = self.verify_auth_key(key).await { return Err(Error::PeerKeyNotValid { - key_id: key.clone(), + key: key.clone(), source: (Arc::new(e) as Arc).into(), }); } @@ -371,9 +371,9 @@ impl Tracker { /// # Errors /// /// Will return an error if the the authentication key cannot be verified. - pub async fn authenticate(&self, key_id: &Key) -> Result<(), auth::Error> { + pub async fn authenticate(&self, key: &Key) -> Result<(), auth::Error> { if self.is_private() { - self.verify_auth_key(key_id).await + self.verify_auth_key(key).await } else { Ok(()) } @@ -1165,9 +1165,9 @@ mod tests { async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { let tracker = private_tracker(); - let unregistered_key_id = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let result = tracker.authenticate(&unregistered_key_id).await; + let result = tracker.authenticate(&unregistered_key).await; assert!(result.is_err()); } @@ -1187,9 +1187,9 @@ mod tests { async fn it_should_fail_verifying_an_unregistered_authentication_key() { let tracker = private_tracker(); - let unregistered_key_id = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - assert!(tracker.verify_auth_key(&unregistered_key_id).await.is_err()); + assert!(tracker.verify_auth_key(&unregistered_key).await.is_err()); } #[tokio::test] diff --git a/tests/http/client.rs b/tests/http/client.rs index fa5fd5d16..762401078 100644 --- a/tests/http/client.rs +++ b/tests/http/client.rs @@ -11,7 +11,7 @@ use super::requests::scrape; pub struct Client { connection_info: ConnectionInfo, reqwest_client: ReqwestClient, - key_id: Option, + key: Option, } /// URL components in this context: @@ -27,7 +27,7 @@ impl Client { Self { connection_info, reqwest_client: reqwest::Client::builder().build().unwrap(), - key_id: None, + key: None, } } @@ -36,15 +36,15 @@ impl Client { Self { connection_info, reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), - key_id: None, + key: None, } } - pub fn authenticated(connection_info: ConnectionInfo, key_id: Key) -> Self { + pub fn authenticated(connection_info: ConnectionInfo, key: Key) -> Self { Self { connection_info, reqwest_client: reqwest::Client::builder().build().unwrap(), - key_id: Some(key_id), + key: Some(key), } } @@ -56,8 +56,8 @@ impl Client { self.get(&self.build_scrape_path_and_query(query)).await } - pub async fn announce_with_header(&self, query: &Query, key_id: &str, value: &str) -> Response { - self.get_with_header(&self.build_announce_path_and_query(query), key_id, value) + pub async fn announce_with_header(&self, query: &Query, key: &str, value: &str) -> Response { + self.get_with_header(&self.build_announce_path_and_query(query), key, value) .await } @@ -83,8 +83,8 @@ impl Client { } fn build_path(&self, path: &str) -> String { - match &self.key_id { - Some(key_id) => format!("{path}/{key_id}"), + match &self.key { + Some(key) => format!("{path}/{key}"), None => path.to_string(), } } diff --git a/tests/http/connection_info.rs b/tests/http/connection_info.rs index eedaa73f0..5736271fd 100644 --- a/tests/http/connection_info.rs +++ b/tests/http/connection_info.rs @@ -3,14 +3,14 @@ use torrust_tracker::tracker::auth::Key; #[derive(Clone, Debug)] pub struct ConnectionInfo { pub bind_address: String, - pub key_id: Option, + pub key: Option, } impl ConnectionInfo { pub fn anonymous(bind_address: &str) -> Self { Self { bind_address: bind_address.to_string(), - key_id: None, + key: None, } } } diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index b1b90f923..4219be30a 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1128,9 +1128,9 @@ mod warp_http_tracker_server { let http_tracker_server = start_private_http_tracker(Version::Warp).await; // The tracker does not have this key - let unregistered_key_id = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) + let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -1242,9 +1242,9 @@ mod warp_http_tracker_server { ) .await; - let false_key_id: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) + let response = Client::authenticated(http_tracker.get_connection_info(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2437,11 +2437,11 @@ mod axum_http_tracker_server { async fn should_fail_if_the_key_query_param_cannot_be_parsed() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; - let invalid_key_id = "INVALID_KEY_ID"; + let invalid_key = "INVALID_KEY"; let response = Client::new(http_tracker_server.get_connection_info()) .get(&format!( - "announce/{invalid_key_id}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" + "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) .await; @@ -2453,9 +2453,9 @@ mod axum_http_tracker_server { let http_tracker_server = start_private_http_tracker(Version::Axum).await; // The tracker does not have this key - let unregistered_key_id = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key_id) + let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -2484,11 +2484,11 @@ mod axum_http_tracker_server { async fn should_fail_if_the_key_query_param_cannot_be_parsed() { let http_tracker_server = start_private_http_tracker(Version::Axum).await; - let invalid_key_id = "INVALID_KEY_ID"; + let invalid_key = "INVALID_KEY"; let response = Client::new(http_tracker_server.get_connection_info()) .get(&format!( - "scrape/{invalid_key_id}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" )) .await; @@ -2583,9 +2583,9 @@ mod axum_http_tracker_server { ) .await; - let false_key_id: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), false_key_id) + let response = Client::authenticated(http_tracker.get_connection_info(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 600d26f2f..35d9af248 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -741,10 +741,10 @@ mod tracker_apis { } #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { + async fn should_fail_deleting_an_auth_key_when_the_key_is_invalid() { let api_server = start_default_api().await; - let invalid_auth_key_ids = [ + let invalid_auth_keys = [ // "", it returns a 404 // " ", it returns a 404 "0", @@ -754,12 +754,12 @@ mod tracker_apis { "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line ]; - for invalid_auth_key_id in &invalid_auth_key_ids { + for invalid_auth_key in &invalid_auth_keys { let response = Client::new(api_server.get_connection_info()) - .delete_auth_key(invalid_auth_key_id) + .delete_auth_key(invalid_auth_key) .await; - assert_invalid_auth_key_param(response, invalid_auth_key_id).await; + assert_invalid_auth_key_param(response, invalid_auth_key).await; } } From 6fc6c14ba613aed0c102845b71315779926b2131 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 11:09:29 +0000 Subject: [PATCH 373/435] test(http): add tests to axum extractor for announce request --- .../extractors/announce_request.rs | 100 +++++++++++++++--- .../axum_implementation/responses/error.rs | 2 +- 2 files changed, 85 insertions(+), 17 deletions(-) diff --git a/src/http/axum_implementation/extractors/announce_request.rs b/src/http/axum_implementation/extractors/announce_request.rs index 0371be9a4..1680cd15c 100644 --- a/src/http/axum_implementation/extractors/announce_request.rs +++ b/src/http/axum_implementation/extractors/announce_request.rs @@ -19,27 +19,95 @@ where type Rejection = Response; async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - let raw_query = parts.uri.query(); - - if raw_query.is_none() { - return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { - location: Location::caller(), - }) - .into_response()); + match extract_announce_from(parts.uri.query()) { + Ok(announce_request) => Ok(ExtractRequest(announce_request)), + Err(error) => Err(error.into_response()), } + } +} - let query = raw_query.unwrap().parse::(); +fn extract_announce_from(maybe_raw_query: Option<&str>) -> Result { + if maybe_raw_query.is_none() { + return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { + location: Location::caller(), + })); + } - if let Err(error) = query { - return Err(responses::error::Error::from(error).into_response()); - } + let query = maybe_raw_query.unwrap().parse::(); - let announce_request = Announce::try_from(query.unwrap()); + if let Err(error) = query { + return Err(responses::error::Error::from(error)); + } - if let Err(error) = announce_request { - return Err(responses::error::Error::from(error).into_response()); - } + let announce_request = Announce::try_from(query.unwrap()); + + if let Err(error) = announce_request { + return Err(responses::error::Error::from(error)); + } + + Ok(announce_request.unwrap()) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::extract_announce_from; + use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; + use crate::http::axum_implementation::responses::error::Error; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_extract_the_announce_request_from_the_url_query_params() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0"; + + let announce = extract_announce_from(Some(raw_query)).unwrap(); + + assert_eq!( + announce, + Announce { + info_hash: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), + peer_id: peer::Id(*b"-qB00000000000000001"), + port: 17548, + downloaded: Some(0), + uploaded: Some(0), + left: Some(0), + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + } + ); + } + + #[test] + fn it_should_reject_a_request_without_query_params() { + let response = extract_announce_from(None).unwrap_err(); + + assert_error_response( + &response, + "Cannot parse query params for announce request: missing query params for announce request", + ); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed() { + let invalid_query = "param1=value1=value2"; + let response = extract_announce_from(Some(invalid_query)).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params"); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_an_announce_request() { + let response = extract_announce_from(Some("param1=value1")).unwrap_err(); - Ok(ExtractRequest(announce_request.unwrap())) + assert_error_response(&response, "Cannot parse query params for announce request"); } } diff --git a/src/http/axum_implementation/responses/error.rs b/src/http/axum_implementation/responses/error.rs index bcf2aaa57..0bcdbd9fb 100644 --- a/src/http/axum_implementation/responses/error.rs +++ b/src/http/axum_implementation/responses/error.rs @@ -2,7 +2,7 @@ use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use serde::{self, Serialize}; -#[derive(Serialize)] +#[derive(Serialize, Debug, PartialEq)] pub struct Error { #[serde(rename = "failure reason")] pub failure_reason: String, From 7b3162267bc493a40b24f46f153857c93649b6ed Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 16:02:19 +0000 Subject: [PATCH 374/435] test(http): add tests to axum extractor for scrape request --- .../extractors/scrape_request.rs | 120 +++++++++++++++--- 1 file changed, 105 insertions(+), 15 deletions(-) diff --git a/src/http/axum_implementation/extractors/scrape_request.rs b/src/http/axum_implementation/extractors/scrape_request.rs index 4212abfcb..998728f59 100644 --- a/src/http/axum_implementation/extractors/scrape_request.rs +++ b/src/http/axum_implementation/extractors/scrape_request.rs @@ -19,27 +19,117 @@ where type Rejection = Response; async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - let raw_query = parts.uri.query(); - - if raw_query.is_none() { - return Err(responses::error::Error::from(ParseScrapeQueryError::MissingParams { - location: Location::caller(), - }) - .into_response()); + match extract_scrape_from(parts.uri.query()) { + Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), + Err(error) => Err(error.into_response()), } + } +} - let query = raw_query.unwrap().parse::(); +fn extract_scrape_from(maybe_raw_query: Option<&str>) -> Result { + if maybe_raw_query.is_none() { + return Err(responses::error::Error::from(ParseScrapeQueryError::MissingParams { + location: Location::caller(), + })); + } - if let Err(error) = query { - return Err(responses::error::Error::from(error).into_response()); - } + let query = maybe_raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error)); + } + + let scrape_request = Scrape::try_from(query.unwrap()); + + if let Err(error) = scrape_request { + return Err(responses::error::Error::from(error)); + } + + Ok(scrape_request.unwrap()) +} - let scrape_request = Scrape::try_from(query.unwrap()); +#[cfg(test)] +mod tests { + use std::str::FromStr; - if let Err(error) = scrape_request { - return Err(responses::error::Error::from(error).into_response()); + use super::extract_scrape_from; + use crate::http::axum_implementation::requests::scrape::Scrape; + use crate::http::axum_implementation::responses::error::Error; + use crate::protocol::info_hash::InfoHash; + + struct TestInfoHash { + pub bencoded: String, + pub value: InfoHash, + } + + fn test_info_hash() -> TestInfoHash { + TestInfoHash { + bencoded: "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0".to_owned(), + value: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), } + } + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_extract_the_scrape_request_from_the_url_query_params() { + let info_hash = test_info_hash(); + + let raw_query = format!("info_hash={}", info_hash.bencoded); + + let scrape = extract_scrape_from(Some(&raw_query)).unwrap(); + + assert_eq!( + scrape, + Scrape { + info_hashes: vec![info_hash.value], + } + ); + } + + #[test] + fn it_should_extract_the_scrape_request_from_the_url_query_params_with_more_than_one_info_hash() { + let info_hash = test_info_hash(); + + let raw_query = format!("info_hash={}&info_hash={}", info_hash.bencoded, info_hash.bencoded); + + let scrape = extract_scrape_from(Some(&raw_query)).unwrap(); + + assert_eq!( + scrape, + Scrape { + info_hashes: vec![info_hash.value, info_hash.value], + } + ); + } + + #[test] + fn it_should_reject_a_request_without_query_params() { + let response = extract_scrape_from(None).unwrap_err(); + + assert_error_response( + &response, + "Cannot parse query params for scrape request: missing query params for scrape request", + ); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed() { + let invalid_query = "param1=value1=value2"; + let response = extract_scrape_from(Some(invalid_query)).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params"); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_a_scrape_request() { + let response = extract_scrape_from(Some("param1=value1")).unwrap_err(); - Ok(ExtractRequest(scrape_request.unwrap())) + assert_error_response(&response, "Cannot parse query params for scrape request"); } } From 828065b38dab9164778eea575b3b9b241ad1ec80 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 17:07:01 +0000 Subject: [PATCH 375/435] test(http): add tests to Axum extractor for auth key --- .../axum_implementation/extractors/key.rs | 101 ++++++++++++------ 1 file changed, 70 insertions(+), 31 deletions(-) diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs index 50aef4a7c..2a3f2a991 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/key.rs @@ -1,6 +1,8 @@ +//! Wrapper for Axum `Path` extractor to return custom errors. use std::panic::Location; use axum::async_trait; +use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; @@ -19,37 +21,74 @@ where type Rejection = Response; async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - match Path::::from_request_parts(parts, state).await { - Ok(key_param) => { - let Ok(key) = key_param.0.value().parse::() else { - return Err(responses::error::Error::from( - auth::Error::InvalidKeyFormat { - location: Location::caller() - }) - .into_response()) - }; - Ok(Extract(key)) - } - Err(rejection) => match rejection { - axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { - return Err(responses::error::Error::from(auth::Error::InvalidKeyFormat { - location: Location::caller(), - }) - .into_response()) - } - axum::extract::rejection::PathRejection::MissingPathParams(_) => { - return Err(responses::error::Error::from(auth::Error::MissingAuthKey { - location: Location::caller(), - }) - .into_response()) - } - _ => { - return Err(responses::error::Error::from(auth::Error::CannotExtractKeyParam { - location: Location::caller(), - }) - .into_response()) - } - }, + // Extract `key` from URL path with Axum `Path` extractor + let maybe_path_with_key = Path::::from_request_parts(parts, state).await; + + match extract_key(maybe_path_with_key) { + Ok(key) => Ok(Extract(key)), + Err(error) => Err(error.into_response()), + } + } +} + +fn extract_key(path_extractor_result: Result, PathRejection>) -> Result { + match path_extractor_result { + Ok(key_param) => match parse_key(&key_param.0.value()) { + Ok(key) => Ok(key), + Err(error) => Err(error), + }, + Err(path_rejection) => Err(custom_error(&path_rejection)), + } +} + +fn parse_key(key: &str) -> Result { + let key = key.parse::(); + + match key { + Ok(key) => Ok(key), + Err(_parse_key_error) => Err(responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + })), + } +} + +fn custom_error(rejection: &PathRejection) -> responses::error::Error { + match rejection { + axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { + responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + }) + } + axum::extract::rejection::PathRejection::MissingPathParams(_) => { + responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + }) } + _ => responses::error::Error::from(auth::Error::CannotExtractKeyParam { + location: Location::caller(), + }), + } +} + +#[cfg(test)] +mod tests { + + use super::parse_key; + use crate::http::axum_implementation::responses::error::Error; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_return_an_authentication_error_if_the_key_cannot_be_parsed() { + let invalid_key = "invalid_key"; + + let response = parse_key(invalid_key).unwrap_err(); + + assert_error_response(&response, "Authentication error: Invalid format for authentication key param"); } } From 3420576edbffaae54f74bdc118de3ee31544847e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 18:41:37 +0000 Subject: [PATCH 376/435] test(http): add tests for peer IP resolution --- .../axum_implementation/extractors/peer_ip.rs | 143 ++++++++++++++++-- 1 file changed, 129 insertions(+), 14 deletions(-) diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/extractors/peer_ip.rs index aae348d99..10f590e70 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/extractors/peer_ip.rs @@ -20,7 +20,7 @@ pub enum ResolutionError { impl From for responses::error::Error { fn from(err: ResolutionError) -> Self { responses::error::Error { - failure_reason: format!("{err}"), + failure_reason: format!("Error resolving peer IP: {err}"), } } } @@ -32,23 +32,138 @@ impl From for responses::error::Error { /// Will return an error if the peer IP cannot be obtained according to the configuration. /// For example, if the IP is extracted from an HTTP header which is missing in the request. pub fn resolve(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { + match resolve_peer_ip(on_reverse_proxy, remote_client_ip) { + Ok(ip) => Ok(ip), + Err(error) => Err(error.into_response()), + } +} + +fn resolve_peer_ip(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { if on_reverse_proxy { - if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { - Ok(ip) - } else { - Err( - responses::error::Error::from(ResolutionError::MissingRightMostXForwardedForIp { - location: Location::caller(), - }) - .into_response(), - ) - } - } else if let Some(ip) = remote_client_ip.connection_info_ip { + resolve_peer_ip_on_reverse_proxy(remote_client_ip) + } else { + resolve_peer_ip_without_reverse_proxy(remote_client_ip) + } +} + +fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &RemoteClientIp) -> Result { + if let Some(ip) = remote_client_ip.connection_info_ip { Ok(ip) } else { Err(responses::error::Error::from(ResolutionError::MissingClientIp { location: Location::caller(), - }) - .into_response()) + })) + } +} + +fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &RemoteClientIp) -> Result { + if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { + Ok(ip) + } else { + Err(responses::error::Error::from( + ResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }, + )) + } +} + +#[cfg(test)] +mod tests { + use super::resolve_peer_ip; + use crate::http::axum_implementation::responses::error::Error; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod working_without_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use super::{assert_error_response, resolve_peer_ip}; + use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; + + #[test] + fn it_should_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let ip = resolve_peer_ip( + on_reverse_proxy, + &RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let response = resolve_peer_ip( + on_reverse_proxy, + &RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } + + mod working_on_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use super::assert_error_response; + use crate::http::axum_implementation::extractors::peer_ip::resolve_peer_ip; + use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; + + #[test] + fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let ip = resolve_peer_ip( + on_reverse_proxy, + &RemoteClientIp { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: None, + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let response = resolve_peer_ip( + on_reverse_proxy, + &RemoteClientIp { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } } } From 743f86908cd7c1e936a86cc2f1b04ce940face24 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 18:56:07 +0000 Subject: [PATCH 377/435] refactor(http): move peer IP resolver to handlers mod It's not an Axum extractor. It's a wrapper of another custom Axum extractor which is used in handlers. Since it does not implements the trait `FromRequestParts` the `handlers` dir seems to be a better location. --- src/http/axum_implementation/extractors/mod.rs | 1 - .../axum_implementation/extractors/remote_client_ip.rs | 2 +- src/http/axum_implementation/handlers/announce.rs | 2 +- src/http/axum_implementation/handlers/common/mod.rs | 1 + .../{extractors => handlers/common}/peer_ip.rs | 7 ++++--- src/http/axum_implementation/handlers/mod.rs | 1 + src/http/axum_implementation/handlers/scrape.rs | 2 +- 7 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 src/http/axum_implementation/handlers/common/mod.rs rename src/http/axum_implementation/{extractors => handlers/common}/peer_ip.rs (93%) diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index e6d9e8c67..04e9e306b 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,5 +1,4 @@ pub mod announce_request; pub mod key; -pub mod peer_ip; pub mod remote_client_ip; pub mod scrape_request; diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/remote_client_ip.rs index e852a1b6f..cfc3532de 100644 --- a/src/http/axum_implementation/extractors/remote_client_ip.rs +++ b/src/http/axum_implementation/extractors/remote_client_ip.rs @@ -18,7 +18,7 @@ use serde::{Deserialize, Serialize}; /// `right_most_x_forwarded_for` = 126.0.0.2 /// `connection_info_ip` = 126.0.0.3 /// -/// More info about inner extractors : +/// More info about inner extractors: #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct RemoteClientIp { pub right_most_x_forwarded_for: Option, diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 4bb06da73..e4b5ece80 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -7,9 +7,9 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; +use super::common::peer_ip; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::key::Extract; -use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::handlers::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; diff --git a/src/http/axum_implementation/handlers/common/mod.rs b/src/http/axum_implementation/handlers/common/mod.rs new file mode 100644 index 000000000..ed159a32b --- /dev/null +++ b/src/http/axum_implementation/handlers/common/mod.rs @@ -0,0 +1 @@ +pub mod peer_ip; diff --git a/src/http/axum_implementation/extractors/peer_ip.rs b/src/http/axum_implementation/handlers/common/peer_ip.rs similarity index 93% rename from src/http/axum_implementation/extractors/peer_ip.rs rename to src/http/axum_implementation/handlers/common/peer_ip.rs index 10f590e70..1c3b6c815 100644 --- a/src/http/axum_implementation/extractors/peer_ip.rs +++ b/src/http/axum_implementation/handlers/common/peer_ip.rs @@ -1,10 +1,11 @@ +//! Helper handler function to resolve the peer IP from the `RemoteClientIp` extractor. use std::net::IpAddr; use std::panic::Location; use axum::response::{IntoResponse, Response}; use thiserror::Error; -use super::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::responses; #[derive(Error, Debug)] @@ -29,7 +30,7 @@ impl From for responses::error::Error { /// /// # Errors /// -/// Will return an error if the peer IP cannot be obtained according to the configuration. +/// Will return an error response if the peer IP cannot be obtained according to the configuration. /// For example, if the IP is extracted from an HTTP header which is missing in the request. pub fn resolve(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { match resolve_peer_ip(on_reverse_proxy, remote_client_ip) { @@ -128,8 +129,8 @@ mod tests { use std::str::FromStr; use super::assert_error_response; - use crate::http::axum_implementation::extractors::peer_ip::resolve_peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; + use crate::http::axum_implementation::handlers::common::peer_ip::resolve_peer_ip; #[test] fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index e6b13ae91..36a810d95 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -3,6 +3,7 @@ use crate::tracker::error::Error; pub mod announce; pub mod auth; +pub mod common; pub mod scrape; impl From for responses::error::Error { diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 41d6bf3dc..d8d68a4c3 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -4,8 +4,8 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; +use super::common::peer_ip; use crate::http::axum_implementation::extractors::key::Extract; -use crate::http::axum_implementation::extractors::peer_ip; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; use crate::http::axum_implementation::requests::scrape::Scrape; From b4ae67d2cc4114c009268e53fd5c0192a3c52194 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 6 Mar 2023 19:07:07 +0000 Subject: [PATCH 378/435] docs(http): add mod description --- src/http/axum_implementation/extractors/remote_client_ip.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/remote_client_ip.rs index cfc3532de..0f6789261 100644 --- a/src/http/axum_implementation/extractors/remote_client_ip.rs +++ b/src/http/axum_implementation/extractors/remote_client_ip.rs @@ -1,3 +1,5 @@ +//! Wrapper for two Axum extractors to get the relevant information +//! to resolve the remote client IP. use std::net::{IpAddr, SocketAddr}; use axum::async_trait; From 637f25f25b8449fd7b5bc7a968da7d92bcef1484 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 14:02:51 +0000 Subject: [PATCH 379/435] refactor(http): move auth mod to handlers::common The `auth` mod does not contains a handler. It only contains auth error and funtion to ma the error into responses. --- src/http/axum_implementation/extractors/key.rs | 2 +- src/http/axum_implementation/handlers/announce.rs | 2 +- src/http/axum_implementation/handlers/{ => common}/auth.rs | 0 src/http/axum_implementation/handlers/common/mod.rs | 1 + src/http/axum_implementation/handlers/mod.rs | 1 - 5 files changed, 3 insertions(+), 3 deletions(-) rename src/http/axum_implementation/handlers/{ => common}/auth.rs (100%) diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/key.rs index 2a3f2a991..e32c4c76a 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/key.rs @@ -7,7 +7,7 @@ use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::axum_implementation::handlers::auth::{self, KeyParam}; +use crate::http::axum_implementation::handlers::common::auth::{self, KeyParam}; use crate::http::axum_implementation::responses; use crate::tracker::auth::Key; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index e4b5ece80..33f78814f 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -11,7 +11,7 @@ use super::common::peer_ip; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; use crate::http::axum_implementation::extractors::key::Extract; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; -use crate::http::axum_implementation::handlers::auth; +use crate::http::axum_implementation::handlers::common::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; use crate::http::axum_implementation::responses::{self, announce}; use crate::http::axum_implementation::services; diff --git a/src/http/axum_implementation/handlers/auth.rs b/src/http/axum_implementation/handlers/common/auth.rs similarity index 100% rename from src/http/axum_implementation/handlers/auth.rs rename to src/http/axum_implementation/handlers/common/auth.rs diff --git a/src/http/axum_implementation/handlers/common/mod.rs b/src/http/axum_implementation/handlers/common/mod.rs index ed159a32b..41bf1369f 100644 --- a/src/http/axum_implementation/handlers/common/mod.rs +++ b/src/http/axum_implementation/handlers/common/mod.rs @@ -1 +1,2 @@ pub mod peer_ip; +pub mod auth; diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/axum_implementation/handlers/mod.rs index 36a810d95..69b69127e 100644 --- a/src/http/axum_implementation/handlers/mod.rs +++ b/src/http/axum_implementation/handlers/mod.rs @@ -2,7 +2,6 @@ use super::responses; use crate::tracker::error::Error; pub mod announce; -pub mod auth; pub mod common; pub mod scrape; From 49bb0db5b2958bcf19cc4b3eab40f0ee4e317d0b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 14:33:33 +0000 Subject: [PATCH 380/435] refactor(http): rename mod and move struct --- .../extractors/{key.rs => authentication_key.rs} | 13 ++++++++++++- src/http/axum_implementation/extractors/mod.rs | 2 +- src/http/axum_implementation/handlers/announce.rs | 2 +- .../axum_implementation/handlers/common/auth.rs | 11 ----------- src/http/axum_implementation/handlers/scrape.rs | 2 +- 5 files changed, 15 insertions(+), 15 deletions(-) rename src/http/axum_implementation/extractors/{key.rs => authentication_key.rs} (92%) diff --git a/src/http/axum_implementation/extractors/key.rs b/src/http/axum_implementation/extractors/authentication_key.rs similarity index 92% rename from src/http/axum_implementation/extractors/key.rs rename to src/http/axum_implementation/extractors/authentication_key.rs index e32c4c76a..8ffc4ff12 100644 --- a/src/http/axum_implementation/extractors/key.rs +++ b/src/http/axum_implementation/extractors/authentication_key.rs @@ -6,13 +6,24 @@ use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use serde::Deserialize; -use crate::http::axum_implementation::handlers::common::auth::{self, KeyParam}; +use crate::http::axum_implementation::handlers::common::auth; use crate::http::axum_implementation::responses; use crate::tracker::auth::Key; pub struct Extract(pub Key); +#[derive(Deserialize)] +pub struct KeyParam(String); + +impl KeyParam { + #[must_use] + pub fn value(&self) -> String { + self.0.clone() + } +} + #[async_trait] impl FromRequestParts for Extract where diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 04e9e306b..97aae63a5 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,4 +1,4 @@ pub mod announce_request; -pub mod key; +pub mod authentication_key; pub mod remote_client_ip; pub mod scrape_request; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 33f78814f..18787737f 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -9,7 +9,7 @@ use log::debug; use super::common::peer_ip; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::key::Extract; +use crate::http::axum_implementation::extractors::authentication_key::Extract; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::handlers::common::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; diff --git a/src/http/axum_implementation/handlers/common/auth.rs b/src/http/axum_implementation/handlers/common/auth.rs index b1b73e60e..30971725a 100644 --- a/src/http/axum_implementation/handlers/common/auth.rs +++ b/src/http/axum_implementation/handlers/common/auth.rs @@ -1,21 +1,10 @@ use std::panic::Location; -use serde::Deserialize; use thiserror::Error; use crate::http::axum_implementation::responses; use crate::tracker::auth; -#[derive(Deserialize)] -pub struct KeyParam(String); - -impl KeyParam { - #[must_use] - pub fn value(&self) -> String { - self.0.clone() - } -} - #[derive(Debug, Error)] pub enum Error { #[error("Missing authentication key param for private tracker. Error in {location}")] diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index d8d68a4c3..b65fa5592 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -5,7 +5,7 @@ use axum::response::{IntoResponse, Response}; use log::debug; use super::common::peer_ip; -use crate::http::axum_implementation::extractors::key::Extract; +use crate::http::axum_implementation::extractors::authentication_key::Extract; use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; use crate::http::axum_implementation::requests::scrape::Scrape; From 6ebcfcdcd78b4011a823e613362748420eead674 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 15:36:31 +0000 Subject: [PATCH 381/435] refactor(http): push logic from Axum to App layer Some app logic was coupled to Axum and it could be potencially used with any other web library. Besides, it's easier to test. --- ...mote_client_ip.rs => client_ip_sources.rs} | 29 +--- .../axum_implementation/extractors/mod.rs | 2 +- .../axum_implementation/handlers/announce.rs | 29 ++-- .../handlers/common/peer_ip.rs | 164 ++---------------- .../axum_implementation/handlers/scrape.rs | 37 ++-- src/http/axum_implementation/services/mod.rs | 1 + .../services/peer_ip_resolver.rs | 149 ++++++++++++++++ src/tracker/error.rs | 2 + 8 files changed, 212 insertions(+), 201 deletions(-) rename src/http/axum_implementation/extractors/{remote_client_ip.rs => client_ip_sources.rs} (51%) create mode 100644 src/http/axum_implementation/services/peer_ip_resolver.rs diff --git a/src/http/axum_implementation/extractors/remote_client_ip.rs b/src/http/axum_implementation/extractors/client_ip_sources.rs similarity index 51% rename from src/http/axum_implementation/extractors/remote_client_ip.rs rename to src/http/axum_implementation/extractors/client_ip_sources.rs index 0f6789261..b41478c22 100644 --- a/src/http/axum_implementation/extractors/remote_client_ip.rs +++ b/src/http/axum_implementation/extractors/client_ip_sources.rs @@ -1,34 +1,19 @@ //! Wrapper for two Axum extractors to get the relevant information //! to resolve the remote client IP. -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use axum::async_trait; use axum::extract::{ConnectInfo, FromRequestParts}; use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; -use serde::{Deserialize, Serialize}; -/// Given this request chain: -/// -/// client <-> http proxy 1 <-> http proxy 2 <-> server -/// ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 -/// X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 -/// -/// This extractor extracts these values from the HTTP headers and connection info. -/// -/// `right_most_x_forwarded_for` = 126.0.0.2 -/// `connection_info_ip` = 126.0.0.3 -/// -/// More info about inner extractors: -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub struct RemoteClientIp { - pub right_most_x_forwarded_for: Option, - pub connection_info_ip: Option, -} +use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + +pub struct Extract(pub ClientIpSources); #[async_trait] -impl FromRequestParts for RemoteClientIp +impl FromRequestParts for Extract where S: Send + Sync, { @@ -45,9 +30,9 @@ where Err(_) => None, }; - Ok(RemoteClientIp { + Ok(Extract(ClientIpSources { right_most_x_forwarded_for, connection_info_ip, - }) + })) } } diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/axum_implementation/extractors/mod.rs index 97aae63a5..557330257 100644 --- a/src/http/axum_implementation/extractors/mod.rs +++ b/src/http/axum_implementation/extractors/mod.rs @@ -1,4 +1,4 @@ pub mod announce_request; pub mod authentication_key; -pub mod remote_client_ip; +pub mod client_ip_sources; pub mod scrape_request; diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 18787737f..05216ce28 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -7,23 +7,28 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use super::common::peer_ip; use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::authentication_key::Extract; -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::extractors::authentication_key::Extract as ExtractKey; +use crate::http::axum_implementation::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::http::axum_implementation::handlers::common::auth; use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; use crate::http::axum_implementation::responses::{self, announce}; -use crate::http::axum_implementation::services; +use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; +use crate::http::axum_implementation::services::{self, peer_ip_resolver}; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::Peer; use crate::tracker::Tracker; +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + #[allow(clippy::unused_async)] pub async fn handle_without_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - remote_client_ip: RemoteClientIp, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { debug!("http announce request: {:#?}", announce_request); @@ -34,15 +39,15 @@ pub async fn handle_without_key( .into_response(); } - handle(&tracker, &announce_request, &remote_client_ip).await + handle(&tracker, &announce_request, &client_ip_sources).await } #[allow(clippy::unused_async)] pub async fn handle_with_key( State(tracker): State>, ExtractRequest(announce_request): ExtractRequest, - Extract(key): Extract, - remote_client_ip: RemoteClientIp, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, + ExtractKey(key): ExtractKey, ) -> Response { debug!("http announce request: {:#?}", announce_request); @@ -51,18 +56,18 @@ pub async fn handle_with_key( Err(error) => return responses::error::Error::from(error).into_response(), } - handle(&tracker, &announce_request, &remote_client_ip).await + handle(&tracker, &announce_request, &client_ip_sources).await } -async fn handle(tracker: &Arc, announce_request: &Announce, remote_client_ip: &RemoteClientIp) -> Response { +async fn handle(tracker: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources) -> Response { match tracker.authorize(&announce_request.info_hash).await { Ok(_) => (), Err(error) => return responses::error::Error::from(error).into_response(), } - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(err) => return err, + Err(error) => return responses::error::Error::from(error).into_response(), }; let mut peer = peer_from_request(announce_request, &peer_ip); diff --git a/src/http/axum_implementation/handlers/common/peer_ip.rs b/src/http/axum_implementation/handlers/common/peer_ip.rs index 1c3b6c815..df10e5eb1 100644 --- a/src/http/axum_implementation/handlers/common/peer_ip.rs +++ b/src/http/axum_implementation/handlers/common/peer_ip.rs @@ -1,170 +1,34 @@ -//! Helper handler function to resolve the peer IP from the `RemoteClientIp` extractor. -use std::net::IpAddr; -use std::panic::Location; - -use axum::response::{IntoResponse, Response}; -use thiserror::Error; - -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; use crate::http::axum_implementation::responses; +use crate::http::axum_implementation::services::peer_ip_resolver::PeerIpResolutionError; -#[derive(Error, Debug)] -pub enum ResolutionError { - #[error( - "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" - )] - MissingRightMostXForwardedForIp { location: &'static Location<'static> }, - #[error("cannot get the client IP from the connection info in {location}")] - MissingClientIp { location: &'static Location<'static> }, -} - -impl From for responses::error::Error { - fn from(err: ResolutionError) -> Self { +impl From for responses::error::Error { + fn from(err: PeerIpResolutionError) -> Self { responses::error::Error { failure_reason: format!("Error resolving peer IP: {err}"), } } } -/// It resolves the peer IP. -/// -/// # Errors -/// -/// Will return an error response if the peer IP cannot be obtained according to the configuration. -/// For example, if the IP is extracted from an HTTP header which is missing in the request. -pub fn resolve(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { - match resolve_peer_ip(on_reverse_proxy, remote_client_ip) { - Ok(ip) => Ok(ip), - Err(error) => Err(error.into_response()), - } -} - -fn resolve_peer_ip(on_reverse_proxy: bool, remote_client_ip: &RemoteClientIp) -> Result { - if on_reverse_proxy { - resolve_peer_ip_on_reverse_proxy(remote_client_ip) - } else { - resolve_peer_ip_without_reverse_proxy(remote_client_ip) - } -} - -fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &RemoteClientIp) -> Result { - if let Some(ip) = remote_client_ip.connection_info_ip { - Ok(ip) - } else { - Err(responses::error::Error::from(ResolutionError::MissingClientIp { - location: Location::caller(), - })) - } -} - -fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &RemoteClientIp) -> Result { - if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { - Ok(ip) - } else { - Err(responses::error::Error::from( - ResolutionError::MissingRightMostXForwardedForIp { - location: Location::caller(), - }, - )) - } -} - #[cfg(test)] mod tests { - use super::resolve_peer_ip; - use crate::http::axum_implementation::responses::error::Error; + use std::panic::Location; - fn assert_error_response(error: &Error, error_message: &str) { + use crate::http::axum_implementation::responses; + use crate::http::axum_implementation::services::peer_ip_resolver::PeerIpResolutionError; + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { assert!( error.failure_reason.contains(error_message), "Error response does not contain message: '{error_message}'. Error: {error:?}" ); } - mod working_without_reverse_proxy { - use std::net::IpAddr; - use std::str::FromStr; - - use super::{assert_error_response, resolve_peer_ip}; - use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - - #[test] - fn it_should_get_the_peer_ip_from_the_connection_info() { - let on_reverse_proxy = false; - - let ip = resolve_peer_ip( - on_reverse_proxy, - &RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), - }, - ) - .unwrap(); - - assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); - } - - #[test] - fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { - let on_reverse_proxy = false; - - let response = resolve_peer_ip( - on_reverse_proxy, - &RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: None, - }, - ) - .unwrap_err(); - - assert_error_response( - &response, - "Error resolving peer IP: cannot get the client IP from the connection info", - ); - } - } - - mod working_on_reverse_proxy { - use std::net::IpAddr; - use std::str::FromStr; - - use super::assert_error_response; - use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; - use crate::http::axum_implementation::handlers::common::peer_ip::resolve_peer_ip; - - #[test] - fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { - let on_reverse_proxy = true; - - let ip = resolve_peer_ip( - on_reverse_proxy, - &RemoteClientIp { - right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), - connection_info_ip: None, - }, - ) - .unwrap(); - - assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); - } - - #[test] - fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { - let on_reverse_proxy = true; - - let response = resolve_peer_ip( - on_reverse_proxy, - &RemoteClientIp { - right_most_x_forwarded_for: None, - connection_info_ip: None, - }, - ) - .unwrap_err(); + #[test] + fn it_should_map_a_peer_ip_resolution_error_into_an_error_response() { + let response = responses::error::Error::from(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }); - assert_error_response( - &response, - "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", - ); - } + assert_error_response(&response, "Error resolving peer IP"); } } diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index b65fa5592..2027b8604 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -4,50 +4,55 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use super::common::peer_ip; -use crate::http::axum_implementation::extractors::authentication_key::Extract; -use crate::http::axum_implementation::extractors::remote_client_ip::RemoteClientIp; +use crate::http::axum_implementation::extractors::authentication_key::Extract as ExtractKey; +use crate::http::axum_implementation::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; use crate::http::axum_implementation::requests::scrape::Scrape; +use crate::http::axum_implementation::services::peer_ip_resolver::{self, ClientIpSources}; use crate::http::axum_implementation::{responses, services}; use crate::tracker::Tracker; +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + #[allow(clippy::unused_async)] pub async fn handle_without_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - remote_client_ip: RemoteClientIp, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); if tracker.requires_authentication() { - return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await; + return handle_fake_scrape(&tracker, &scrape_request, &client_ip_sources).await; } - handle_real_scrape(&tracker, &scrape_request, &remote_client_ip).await + handle_real_scrape(&tracker, &scrape_request, &client_ip_sources).await } #[allow(clippy::unused_async)] pub async fn handle_with_key( State(tracker): State>, ExtractRequest(scrape_request): ExtractRequest, - Extract(key): Extract, - remote_client_ip: RemoteClientIp, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, + ExtractKey(key): ExtractKey, ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); match tracker.authenticate(&key).await { Ok(_) => (), - Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &remote_client_ip).await, + Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &client_ip_sources).await, } - handle_real_scrape(&tracker, &scrape_request, &remote_client_ip).await + handle_real_scrape(&tracker, &scrape_request, &client_ip_sources).await } -async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &RemoteClientIp) -> Response { - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { +async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, client_ip_sources: &ClientIpSources) -> Response { + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(err) => return err, + Err(error) => return responses::error::Error::from(error).into_response(), }; let scrape_data = services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; @@ -56,10 +61,10 @@ async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, rem } /// When authentication fails in `private` mode the tracker returns empty swarm metadata for all the requested infohashes. -async fn handle_fake_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &RemoteClientIp) -> Response { - let peer_ip = match peer_ip::resolve(tracker.config.on_reverse_proxy, remote_client_ip) { +async fn handle_fake_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &ClientIpSources) -> Response { + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, remote_client_ip) { Ok(peer_ip) => peer_ip, - Err(err) => return err, + Err(error) => return responses::error::Error::from(error).into_response(), }; let scrape_data = services::scrape::fake_invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; diff --git a/src/http/axum_implementation/services/mod.rs b/src/http/axum_implementation/services/mod.rs index 776d2dfbf..5d1acd67d 100644 --- a/src/http/axum_implementation/services/mod.rs +++ b/src/http/axum_implementation/services/mod.rs @@ -1,2 +1,3 @@ pub mod announce; +pub mod peer_ip_resolver; pub mod scrape; diff --git a/src/http/axum_implementation/services/peer_ip_resolver.rs b/src/http/axum_implementation/services/peer_ip_resolver.rs new file mode 100644 index 000000000..fae1e4ec0 --- /dev/null +++ b/src/http/axum_implementation/services/peer_ip_resolver.rs @@ -0,0 +1,149 @@ +//! Given this request chain: +//! +//! client <-> http proxy 1 <-> http proxy 2 <-> server +//! ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 +//! X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 +//! +//! This service resolves the peer IP from these values: +//! +//! `right_most_x_forwarded_for` = 126.0.0.2 +//! `connection_info_ip` = 126.0.0.3 +//! +//! Depending on the tracker configuration. +use std::net::IpAddr; +use std::panic::Location; + +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct ClientIpSources { + pub right_most_x_forwarded_for: Option, + pub connection_info_ip: Option, +} + +#[derive(Error, Debug)] +pub enum PeerIpResolutionError { + #[error( + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" + )] + MissingRightMostXForwardedForIp { location: &'static Location<'static> }, + #[error("cannot get the client IP from the connection info in {location}")] + MissingClientIp { location: &'static Location<'static> }, +} + +/// # Errors +/// +/// Will return an error if the peer IP cannot be obtained according to the configuration. +/// For example, if the IP is extracted from an HTTP header which is missing in the request. +pub fn invoke(on_reverse_proxy: bool, client_ip_sources: &ClientIpSources) -> Result { + if on_reverse_proxy { + resolve_peer_ip_on_reverse_proxy(client_ip_sources) + } else { + resolve_peer_ip_without_reverse_proxy(client_ip_sources) + } +} + +fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { + if let Some(ip) = remote_client_ip.connection_info_ip { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingClientIp { + location: Location::caller(), + }) + } +} + +fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { + if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::invoke; + + mod working_without_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use super::invoke; + use crate::http::axum_implementation::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + + #[test] + fn it_should_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let ip = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let error = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert!(matches!(error, PeerIpResolutionError::MissingClientIp { .. })); + } + } + + mod working_on_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use crate::http::axum_implementation::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + + #[test] + fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let ip = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: None, + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let error = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert!(matches!(error, PeerIpResolutionError::MissingRightMostXForwardedForIp { .. })); + } + } +} diff --git a/src/tracker/error.rs b/src/tracker/error.rs index 51bcbf3bb..080903da6 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -4,6 +4,7 @@ use crate::located_error::LocatedError; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { + // Authentication errors #[error("The supplied key: {key:?}, is not valid: {source}")] PeerKeyNotValid { key: super::auth::Key, @@ -12,6 +13,7 @@ pub enum Error { #[error("The peer is not authenticated, {location}")] PeerNotAuthenticated { location: &'static Location<'static> }, + // Authorization errors #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { info_hash: crate::protocol::info_hash::InfoHash, From fa609949d407f3ab36f114f65741ce0beaace137 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 17:58:21 +0000 Subject: [PATCH 382/435] test(http): [#128] unit test for announce handler --- .../axum_implementation/handlers/announce.rs | 279 ++++++++++++++++-- 1 file changed, 257 insertions(+), 22 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 05216ce28..9a92b243d 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -16,13 +16,9 @@ use crate::http::axum_implementation::responses::{self, announce}; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::http::axum_implementation::services::{self, peer_ip_resolver}; use crate::protocol::clock::{Current, Time}; +use crate::tracker::auth::Key; use crate::tracker::peer::Peer; -use crate::tracker::Tracker; - -/* code-review: authentication, authorization and peer IP resolution could be moved - from the handler (Axum) layer into the app layer `services::announce::invoke`. - That would make the handler even simpler and the code more reusable and decoupled from Axum. -*/ +use crate::tracker::{AnnounceData, Tracker}; #[allow(clippy::unused_async)] pub async fn handle_without_key( @@ -32,14 +28,7 @@ pub async fn handle_without_key( ) -> Response { debug!("http announce request: {:#?}", announce_request); - if tracker.requires_authentication() { - return responses::error::Error::from(auth::Error::MissingAuthKey { - location: Location::caller(), - }) - .into_response(); - } - - handle(&tracker, &announce_request, &client_ip_sources).await + handle(&tracker, &announce_request, &client_ip_sources, None).await } #[allow(clippy::unused_async)] @@ -51,29 +40,67 @@ pub async fn handle_with_key( ) -> Response { debug!("http announce request: {:#?}", announce_request); - match tracker.authenticate(&key).await { - Ok(_) => (), - Err(error) => return responses::error::Error::from(error).into_response(), - } + handle(&tracker, &announce_request, &client_ip_sources, Some(key)).await +} - handle(&tracker, &announce_request, &client_ip_sources).await +async fn handle( + tracker: &Arc, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Response { + let announce_data = match handle_announce(tracker, announce_request, client_ip_sources, maybe_key).await { + Ok(announce_data) => announce_data, + Err(error) => return error.into_response(), + }; + build_response(announce_request, announce_data) } -async fn handle(tracker: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources) -> Response { +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + +async fn handle_announce( + tracker: &Arc, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Result { + // Authentication + if tracker.requires_authentication() { + match maybe_key { + Some(key) => match tracker.authenticate(&key).await { + Ok(_) => (), + Err(error) => return Err(responses::error::Error::from(error)), + }, + None => { + return Err(responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + })) + } + } + } + + // Authorization match tracker.authorize(&announce_request.info_hash).await { Ok(_) => (), - Err(error) => return responses::error::Error::from(error).into_response(), + Err(error) => return Err(responses::error::Error::from(error)), } let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(error) => return responses::error::Error::from(error).into_response(), + Err(error) => return Err(responses::error::Error::from(error)), }; let mut peer = peer_from_request(announce_request, &peer_ip); let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; + Ok(announce_data) +} + +fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { match &announce_request.compact { Some(compact) => match compact { Compact::Accepted => announce::Compact::from(announce_data).into_response(), @@ -108,3 +135,211 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { None => aquatic_udp_protocol::AnnounceEvent::None, } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::http::axum_implementation::requests::announce::Announce; + use crate::http::axum_implementation::responses; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::mode::Mode; + use crate::tracker::statistics::Keeper; + use crate::tracker::{peer, Tracker}; + + fn private_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Private; + tracker_factory(configuration) + } + + fn listed_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Listed; + tracker_factory(configuration) + } + + fn tracker_on_reverse_proxy() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = true; + tracker_factory(configuration) + } + + fn tracker_not_on_reverse_proxy() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = false; + tracker_factory(configuration) + } + + fn tracker_factory(configuration: Configuration) -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + fn sample_announce_request() -> Announce { + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + downloaded: None, + uploaded: None, + left: None, + event: None, + compact: None, + } + } + + fn sample_client_ip_sources() -> ClientIpSources { + ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + } + } + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod with_tracker_in_private_mode { + + use std::str::FromStr; + use std::sync::Arc; + + use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; + use crate::http::axum_implementation::handlers::announce::handle_announce; + use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::tracker::auth; + + #[tokio::test] + async fn it_should_fail_when_the_authentication_key_is_missing() { + let tracker = Arc::new(private_tracker()); + + let maybe_key = None; + + let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Authentication error: Missing authentication key param for private tracker", + ); + } + + #[tokio::test] + async fn it_should_fail_when_the_authentication_key_is_invalid() { + let tracker = Arc::new(private_tracker()); + + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let maybe_key = Some(unregistered_key); + + let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) + .await + .unwrap_err(); + + assert_error_response(&response, "Authentication error: Failed to read key"); + } + } + + mod with_tracker_in_listed_mode { + + use std::sync::Arc; + + use super::{listed_tracker, sample_announce_request, sample_client_ip_sources}; + use crate::http::axum_implementation::handlers::announce::handle_announce; + use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + + #[tokio::test] + async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { + let tracker = Arc::new(listed_tracker()); + + let announce_request = sample_announce_request(); + + let response = handle_announce(&tracker, &announce_request, &sample_client_ip_sources(), None) + .await + .unwrap_err(); + + assert_error_response( + &response, + &format!( + "Tracker error: The torrent: {}, is not whitelisted", + announce_request.info_hash + ), + ); + } + } + + mod with_tracker_on_reverse_proxy { + + use std::sync::Arc; + + use super::{sample_announce_request, tracker_on_reverse_proxy}; + use crate::http::axum_implementation::handlers::announce::handle_announce; + use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { + let tracker = Arc::new(tracker_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } + } + + mod with_tracker_not_on_reverse_proxy { + + use std::sync::Arc; + + use super::{sample_announce_request, tracker_not_on_reverse_proxy}; + use crate::http::axum_implementation::handlers::announce::handle_announce; + use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { + let tracker = Arc::new(tracker_not_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } +} From 3860fc867b664e47186260996f3b434cfa57e6c8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Mar 2023 18:08:48 +0000 Subject: [PATCH 383/435] fix: format --- src/http/axum_implementation/handlers/common/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http/axum_implementation/handlers/common/mod.rs b/src/http/axum_implementation/handlers/common/mod.rs index 41bf1369f..dc028cabf 100644 --- a/src/http/axum_implementation/handlers/common/mod.rs +++ b/src/http/axum_implementation/handlers/common/mod.rs @@ -1,2 +1,2 @@ -pub mod peer_ip; pub mod auth; +pub mod peer_ip; From 930a424d372ca1d80e0f85d118d121ac34e82a2f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Mar 2023 16:35:42 +0000 Subject: [PATCH 384/435] test(http): [#220] unit tests for scrape handler --- .../axum_implementation/handlers/scrape.rs | 271 ++++++++++++++++-- .../axum_implementation/services/scrape.rs | 2 +- 2 files changed, 245 insertions(+), 28 deletions(-) diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 2027b8604..ee59b80a3 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -10,12 +10,8 @@ use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::services::peer_ip_resolver::{self, ClientIpSources}; use crate::http::axum_implementation::{responses, services}; -use crate::tracker::Tracker; - -/* code-review: authentication, authorization and peer IP resolution could be moved - from the handler (Axum) layer into the app layer `services::announce::invoke`. - That would make the handler even simpler and the code more reusable and decoupled from Axum. -*/ +use crate::tracker::auth::Key; +use crate::tracker::{ScrapeData, Tracker}; #[allow(clippy::unused_async)] pub async fn handle_without_key( @@ -25,11 +21,7 @@ pub async fn handle_without_key( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - if tracker.requires_authentication() { - return handle_fake_scrape(&tracker, &scrape_request, &client_ip_sources).await; - } - - handle_real_scrape(&tracker, &scrape_request, &client_ip_sources).await + handle(&tracker, &scrape_request, &client_ip_sources, None).await } #[allow(clippy::unused_async)] @@ -41,33 +33,258 @@ pub async fn handle_with_key( ) -> Response { debug!("http scrape request: {:#?}", &scrape_request); - match tracker.authenticate(&key).await { - Ok(_) => (), - Err(_) => return handle_fake_scrape(&tracker, &scrape_request, &client_ip_sources).await, - } + handle(&tracker, &scrape_request, &client_ip_sources, Some(key)).await +} - handle_real_scrape(&tracker, &scrape_request, &client_ip_sources).await +async fn handle( + tracker: &Arc, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Response { + let scrape_data = match handle_scrape(tracker, scrape_request, client_ip_sources, maybe_key).await { + Ok(scrape_data) => scrape_data, + Err(error) => return error.into_response(), + }; + build_response(scrape_data) } -async fn handle_real_scrape(tracker: &Arc, scrape_request: &Scrape, client_ip_sources: &ClientIpSources) -> Response { +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + +async fn handle_scrape( + tracker: &Arc, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Result { + // Authentication + let return_real_scrape_data = if tracker.requires_authentication() { + match maybe_key { + Some(key) => match tracker.authenticate(&key).await { + Ok(_) => true, + Err(_error) => false, + }, + None => false, + } + } else { + true + }; + + // Authorization for scrape requests is handled at the `Tracker` level + // for each torrent. + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(error) => return responses::error::Error::from(error).into_response(), + Err(error) => return Err(responses::error::Error::from(error)), }; - let scrape_data = services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; + if return_real_scrape_data { + Ok(services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await) + } else { + Ok(services::scrape::fake(tracker, &scrape_request.info_hashes, &peer_ip).await) + } +} +fn build_response(scrape_data: ScrapeData) -> Response { responses::scrape::Bencoded::from(scrape_data).into_response() } -/// When authentication fails in `private` mode the tracker returns empty swarm metadata for all the requested infohashes. -async fn handle_fake_scrape(tracker: &Arc, scrape_request: &Scrape, remote_client_ip: &ClientIpSources) -> Response { - let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, remote_client_ip) { - Ok(peer_ip) => peer_ip, - Err(error) => return responses::error::Error::from(error).into_response(), - }; +#[cfg(test)] +mod tests { + use std::net::IpAddr; + use std::str::FromStr; + use std::sync::Arc; - let scrape_data = services::scrape::fake_invoke(tracker, &scrape_request.info_hashes, &peer_ip).await; + use crate::config::{ephemeral_configuration, Configuration}; + use crate::http::axum_implementation::requests::scrape::Scrape; + use crate::http::axum_implementation::responses; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::mode::Mode; + use crate::tracker::statistics::Keeper; + use crate::tracker::Tracker; - responses::scrape::Bencoded::from(scrape_data).into_response() + fn private_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Private; + tracker_factory(configuration) + } + + fn listed_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Listed; + tracker_factory(configuration) + } + + fn tracker_on_reverse_proxy() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = true; + tracker_factory(configuration) + } + + fn tracker_not_on_reverse_proxy() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.on_reverse_proxy = false; + tracker_factory(configuration) + } + + fn tracker_factory(configuration: Configuration) -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + fn sample_scrape_request() -> Scrape { + Scrape { + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], + } + } + + fn sample_client_ip_sources() -> ClientIpSources { + ClientIpSources { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: Some(IpAddr::from_str("203.0.113.196").unwrap()), + } + } + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod with_tracker_in_private_mode { + use std::str::FromStr; + use std::sync::Arc; + + use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; + use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::tracker::{auth, ScrapeData}; + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { + let tracker = Arc::new(private_tracker()); + + let scrape_request = sample_scrape_request(); + let maybe_key = None; + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { + let tracker = Arc::new(private_tracker()); + + let scrape_request = sample_scrape_request(); + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let maybe_key = Some(unregistered_key); + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + + mod with_tracker_in_listed_mode { + + use std::sync::Arc; + + use super::{listed_tracker, sample_client_ip_sources, sample_scrape_request}; + use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::tracker::ScrapeData; + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { + let tracker = Arc::new(listed_tracker()); + + let scrape_request = sample_scrape_request(); + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), None) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + + mod with_tracker_on_reverse_proxy { + use std::sync::Arc; + + use super::{sample_scrape_request, tracker_on_reverse_proxy}; + use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::http::axum_implementation::handlers::scrape::tests::assert_error_response; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { + let tracker = Arc::new(tracker_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } + } + + mod with_tracker_not_on_reverse_proxy { + use std::sync::Arc; + + use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; + use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::http::axum_implementation::handlers::scrape::tests::assert_error_response; + use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { + let tracker = Arc::new(tracker_not_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } } diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs index 923acf3c4..cfcba09f9 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/axum_implementation/services/scrape.rs @@ -14,7 +14,7 @@ pub async fn invoke(tracker: &Arc, info_hashes: &Vec, origina /// When the peer is not authenticated and the tracker is running in `private` mode, /// the tracker returns empty stats for all the torrents. -pub async fn fake_invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { +pub async fn fake(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { send_scrape_event(original_peer_ip, tracker).await; ScrapeData::zeroed(info_hashes) From 65bb1c3d28a78ec435c554512db27b04d4345b10 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Mar 2023 18:16:20 +0000 Subject: [PATCH 385/435] test(http): [#222] unit tests for announce service --- .../axum_implementation/services/announce.rs | 191 ++++++++++++++++++ 1 file changed, 191 insertions(+) diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 356dbaeb9..5ce0fb1d5 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -22,3 +22,194 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) announce_data } + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::config::{ephemeral_configuration, Configuration}; + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::mode::Mode; + use crate::tracker::statistics::Keeper; + use crate::tracker::{peer, Tracker}; + + fn public_tracker() -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.mode = Mode::Public; + tracker_factory(configuration) + } + + fn tracker_factory(configuration: Configuration) -> Tracker { + // code-review: the tracker initialization is duplicated in many places. Consider make this function public. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + fn sample_peer_using_ipv4() -> peer::Peer { + sample_peer() + } + + fn sample_peer_using_ipv6() -> peer::Peer { + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + ); + peer + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + mod with_tracker_in_any_mode { + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::config::ephemeral_configuration; + use crate::http::axum_implementation::services::announce::invoke; + use crate::http::axum_implementation::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + use crate::tracker::peer::Peer; + use crate::tracker::torrent::SwarmStats; + use crate::tracker::{statistics, AnnounceData, Tracker}; + + #[tokio::test] + async fn it_should_return_the_announce_data() { + let tracker = Arc::new(public_tracker()); + + let mut peer = sample_peer(); + + let announce_data = invoke(tracker.clone(), sample_info_hash(), &mut peer).await; + + let expected_announce_data = AnnounceData { + peers: vec![], + swarm_stats: SwarmStats { + completed: 0, + seeders: 1, + leechers: 0, + }, + interval: tracker.config.announce_interval, + interval_min: tracker.config.min_announce_interval, + }; + + assert_eq!(announce_data, expected_announce_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &Arc::new(ephemeral_configuration()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let mut peer = sample_peer_using_ipv4(); + + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer).await; + } + + fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { + let mut configuration = ephemeral_configuration(); + configuration.external_ip = + Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); + + Tracker::new(&Arc::new(configuration), Some(stats_event_sender), statistics::Repo::new()).unwrap() + } + + fn peer_with_the_ipv4_loopback_ip() -> Peer { + let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new(loopback_ip, 8080); + peer + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4_even_if_the_tracker_changes_the_peer_ip_to_ipv6() + { + // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. + + // Assert that the event sent is a TCP4 event + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let mut peer = peer_with_the_ipv4_loopback_ip(); + + let _announce_data = invoke( + tracker_with_an_ipv6_external_ip(stats_event_sender).into(), + sample_info_hash(), + &mut peer, + ) + .await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() + { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &Arc::new(ephemeral_configuration()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let mut peer = sample_peer_using_ipv6(); + + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer).await; + } + } +} From 164f29aacdd6d47926d41516d4b306f360f92ac9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 23 Feb 2023 16:03:59 +0100 Subject: [PATCH 386/435] isolated configuration, test-helpers and primitives crates --- Cargo.lock | 45 ++ Cargo.toml | 62 +-- packages/configuration/Cargo.toml | 16 + .../configuration/src/lib.rs | 39 +- packages/located-error/Cargo.toml | 9 + .../located-error/src/lib.rs | 0 packages/primitives/Cargo.toml | 9 + .../mode.rs => packages/primitives/src/lib.rs | 10 +- packages/test-helpers/Cargo.toml | 11 + packages/test-helpers/src/configuration.rs | 34 ++ packages/test-helpers/src/lib.rs | 2 + packages/test-helpers/src/random.rs | 7 + src/apis/middlewares/auth.rs | 2 +- src/apis/server.rs | 176 +++++++- src/databases/driver.rs | 34 +- src/databases/error.rs | 29 +- src/databases/mysql.rs | 4 +- src/databases/sqlite.rs | 6 +- .../axum_implementation/handlers/announce.rs | 18 +- .../axum_implementation/handlers/scrape.rs | 18 +- .../axum_implementation/requests/announce.rs | 2 +- .../axum_implementation/requests/scrape.rs | 2 +- .../axum_implementation/services/announce.rs | 17 +- src/http/warp_implementation/error.rs | 3 +- .../warp_implementation/filter_helpers.rs | 3 +- src/jobs/http_tracker.rs | 2 +- src/jobs/torrent_cleanup.rs | 2 +- src/jobs/tracker_apis.rs | 2 +- src/jobs/udp_tracker.rs | 2 +- src/lib.rs | 46 +- src/logging.rs | 3 +- src/main.rs | 2 +- src/setup.rs | 2 +- src/tracker/auth.rs | 2 +- src/tracker/error.rs | 2 +- src/tracker/mod.rs | 34 +- src/tracker/services/common.rs | 3 +- src/tracker/services/statistics.rs | 6 +- src/tracker/services/torrent.rs | 12 +- src/udp/error.rs | 3 +- src/udp/handlers.rs | 16 +- src/udp/server.rs | 129 +++++- tests/api/mod.rs | 2 +- tests/api/server.rs | 78 ---- tests/api/test_environment.rs | 134 ++++++ tests/http/server.rs | 10 +- tests/tracker_api.rs | 404 +++++++++++------- tests/udp/client.rs | 65 ++- tests/udp/mod.rs | 7 +- tests/udp/server.rs | 67 --- tests/udp/test_environment.rs | 131 ++++++ tests/udp_tracker.rs | 36 +- 52 files changed, 1226 insertions(+), 534 deletions(-) create mode 100644 packages/configuration/Cargo.toml rename src/config.rs => packages/configuration/src/lib.rs (93%) create mode 100644 packages/located-error/Cargo.toml rename src/located_error.rs => packages/located-error/src/lib.rs (100%) create mode 100644 packages/primitives/Cargo.toml rename src/tracker/mode.rs => packages/primitives/src/lib.rs (70%) create mode 100644 packages/test-helpers/Cargo.toml create mode 100644 packages/test-helpers/src/configuration.rs create mode 100644 packages/test-helpers/src/lib.rs create mode 100644 packages/test-helpers/src/random.rs delete mode 100644 tests/api/server.rs create mode 100644 tests/api/test_environment.rs delete mode 100644 tests/udp/server.rs create mode 100644 tests/udp/test_environment.rs diff --git a/Cargo.lock b/Cargo.lock index cfd8aaba8..9045b7c47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2972,10 +2972,55 @@ dependencies = [ "thiserror", "tokio", "toml", + "torrust-tracker-configuration", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", "uuid 1.2.1", "warp", ] +[[package]] +name = "torrust-tracker-configuration" +version = "2.3.0" +dependencies = [ + "config", + "log", + "serde", + "serde_with", + "thiserror", + "toml", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "uuid 1.2.1", +] + +[[package]] +name = "torrust-tracker-located-error" +version = "2.3.0" +dependencies = [ + "log", + "thiserror", +] + +[[package]] +name = "torrust-tracker-primitives" +version = "2.3.0" +dependencies = [ + "derive_more", + "serde", +] + +[[package]] +name = "torrust-tracker-test-helpers" +version = "2.3.0" +dependencies = [ + "lazy_static", + "rand", + "tokio", + "torrust-tracker-configuration", +] + [[package]] name = "tower" version = "0.4.13" diff --git a/Cargo.toml b/Cargo.toml index fa126a152..740a5805e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,31 +1,19 @@ [package] -edition = "2021" name = "torrust-tracker" -version = "2.3.0" -license = "AGPL-3.0" -authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." -repository = "https://github.com/torrust/torrust-tracker" - -[profile.dev] -debug = 1 -opt-level = 1 -lto = "thin" +license = "AGPL-3.0" +authors.workspace = true +edition.workspace = true +version.workspace = true -[profile.release] -debug = 1 -opt-level = 3 -lto = "fat" +[workspace.package] +authors = ["Nautilus Cyberneering , Mick van Dijke "] +edition = "2021" +repository = "https://github.com/torrust/torrust-tracker" +version = "2.3.0" [dependencies] -tokio = { version = "1", features = [ - "rt-multi-thread", - "net", - "sync", - "macros", - "signal", -] } - +tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } serde = { version = "1.0", features = ["derive"] } serde_bencode = "^0.2.3" serde_json = "1.0" @@ -34,37 +22,32 @@ hex = "0.4.3" percent-encoding = "2" binascii = "0.1" lazy_static = "1.4" - openssl = { version = "0.10", features = ["vendored"] } - warp = { version = "0.3", features = ["tls"] } - config = "0.13" toml = "0.5" - log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" - r2d2 = "0.8" r2d2_mysql = "21" r2d2_sqlite = { version = "0.21", features = ["bundled"] } - rand = "0.8" derive_more = "0.99" thiserror = "1.0" futures = "0.3" async-trait = "0.1" - aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } axum = "0.6.1" axum-server = { version = "0.4.4", features = ["tls-rustls"] } axum-client-ip = "0.4.0" bip_bencode = "0.4.4" +torrust-tracker-primitives = { path = "packages/primitives" } +torrust-tracker-configuration = { path = "packages/configuration" } +torrust-tracker-located-error = { path = "packages/located-error" } multimap = "0.8.3" - [dev-dependencies] mockall = "0.11" reqwest = { version = "0.11.13", features = ["json"] } @@ -72,3 +55,22 @@ serde_urlencoded = "0.7.1" serde_repr = "0.1.10" serde_bytes = "0.11.8" local-ip-address = "0.5.1" +torrust-tracker-test-helpers = { path = "packages/test-helpers" } + +[workspace] +members = [ + "packages/configuration", + "packages/primitives", + "packages/test-helpers", + "packages/located-error", +] + +[profile.dev] +debug = 1 +opt-level = 1 +lto = "thin" + +[profile.release] +debug = 1 +opt-level = 3 +lto = "fat" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml new file mode 100644 index 000000000..a6f1740a0 --- /dev/null +++ b/packages/configuration/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "torrust-tracker-configuration" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_with = "2.0" +config = "0.13" +toml = "0.5" +log = { version = "0.4", features = ["release_max_level_info"] } +thiserror = "1.0" +torrust-tracker-primitives = { path = "../primitives" } +torrust-tracker-located-error = { path = "../located-error" } +uuid = { version = "1", features = ["v4"] } diff --git a/src/config.rs b/packages/configuration/src/lib.rs similarity index 93% rename from src/config.rs rename to packages/configuration/src/lib.rs index 7ed0f9fa7..2121752c5 100644 --- a/src/config.rs +++ b/packages/configuration/src/lib.rs @@ -8,17 +8,13 @@ use std::{env, fs}; use config::{Config, ConfigError, File, FileFormat}; use log::warn; -use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use thiserror::Error; -use {std, toml}; +use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; -use crate::databases::driver::Driver; -use crate::located_error::{Located, LocatedError}; -use crate::tracker::mode; - -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct UdpTracker { pub enabled: bool, pub bind_address: String, @@ -62,8 +58,8 @@ impl HttpApi { #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, - pub mode: mode::Mode, - pub db_driver: Driver, + pub mode: TrackerMode, + pub db_driver: DatabaseDriver, pub db_path: String, pub announce_interval: u32, pub min_announce_interval: u32, @@ -122,41 +118,34 @@ pub fn ephemeral_configuration() -> Configuration { }; // Ephemeral socket address for API - let api_port = random_port(); + let api_port = 0u16; config.http_api.enabled = true; config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); // Ephemeral socket address for UDP tracker - let upd_port = random_port(); + let udp_port = 0u16; config.udp_trackers[0].enabled = true; - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &upd_port); + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &udp_port); // Ephemeral socket address for HTTP tracker - let http_port = random_port(); + let http_port = 0u16; config.http_trackers[0].enabled = true; config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); // Ephemeral sqlite database let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}_{}_{}.db", &api_port, &upd_port, &http_port)); + let temp_file = temp_directory.join(format!("data_{}_{}_{}.db", &api_port, &udp_port, &http_port)); config.db_path = temp_file.to_str().unwrap().to_owned(); config } -fn random_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) -} - impl Default for Configuration { fn default() -> Self { let mut configuration = Configuration { log_level: Option::from(String::from("info")), - mode: mode::Mode::Public, - db_driver: Driver::Sqlite3, + mode: TrackerMode::Public, + db_driver: DatabaseDriver::Sqlite3, db_path: String::from("./storage/database/data.db"), announce_interval: 120, min_announce_interval: 120, @@ -266,7 +255,7 @@ impl Configuration { #[cfg(test)] mod tests { - use crate::config::Configuration; + use crate::Configuration; #[cfg(test)] fn default_config_toml() -> String { @@ -325,7 +314,7 @@ mod tests { fn configuration_should_contain_the_external_ip() { let configuration = Configuration::default(); - assert_eq!(configuration.external_ip, Option::Some(String::from("0.0.0.0"))); + assert_eq!(configuration.external_ip, Some(String::from("0.0.0.0"))); } #[test] diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml new file mode 100644 index 000000000..c4b2ef726 --- /dev/null +++ b/packages/located-error/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "torrust-tracker-located-error" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +log = { version = "0.4", features = ["release_max_level_info"] } +thiserror = "1.0" diff --git a/src/located_error.rs b/packages/located-error/src/lib.rs similarity index 100% rename from src/located_error.rs rename to packages/located-error/src/lib.rs diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml new file mode 100644 index 000000000..9aec28384 --- /dev/null +++ b/packages/primitives/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "torrust-tracker-primitives" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +derive_more = "0.99" diff --git a/src/tracker/mode.rs b/packages/primitives/src/lib.rs similarity index 70% rename from src/tracker/mode.rs rename to packages/primitives/src/lib.rs index a0dba6e67..bcd48145f 100644 --- a/src/tracker/mode.rs +++ b/packages/primitives/src/lib.rs @@ -1,8 +1,14 @@ -use serde; use serde::{Deserialize, Serialize}; +// TODO: Move to the database crate once that gets its own crate. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] +pub enum DatabaseDriver { + Sqlite3, + MySQL, +} + #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] -pub enum Mode { +pub enum TrackerMode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] Public, diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml new file mode 100644 index 000000000..5be0e8aba --- /dev/null +++ b/packages/test-helpers/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "torrust-tracker-test-helpers" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +lazy_static = "1.4" +rand = "0.8.5" +torrust-tracker-configuration = { path = "../configuration"} diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs new file mode 100644 index 000000000..f7c584d55 --- /dev/null +++ b/packages/test-helpers/src/configuration.rs @@ -0,0 +1,34 @@ +use std::env; + +use torrust_tracker_configuration::Configuration; + +use crate::random; + +/// This configuration is used for testing. It generates random config values so they do not collide +/// if you run more than one tracker at the same time. +/// +/// # Panics +/// +/// Will panic if it can't convert the temp file path to string +#[must_use] +pub fn ephemeral() -> Configuration { + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; + + // Ephemeral socket addresses + let bind_addr = "127.0.0.1:0".to_string(); + + config.http_api.bind_address = bind_addr.to_string(); + config.udp_trackers[0].bind_address = bind_addr; + + // Ephemeral sqlite database + let temp_directory = env::temp_dir(); + let random_db_id = random::string(16); + let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); + + config.db_path = temp_file.to_str().unwrap().to_owned(); + + config +} diff --git a/packages/test-helpers/src/lib.rs b/packages/test-helpers/src/lib.rs new file mode 100644 index 000000000..e0f350131 --- /dev/null +++ b/packages/test-helpers/src/lib.rs @@ -0,0 +1,2 @@ +pub mod configuration; +pub mod random; diff --git a/packages/test-helpers/src/random.rs b/packages/test-helpers/src/random.rs new file mode 100644 index 000000000..ffb2ccd6f --- /dev/null +++ b/packages/test-helpers/src/random.rs @@ -0,0 +1,7 @@ +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; + +/// Returns a random alphanumeric string of a certain size. +pub fn string(size: usize) -> String { + thread_rng().sample_iter(&Alphanumeric).take(size).map(char::from).collect() +} diff --git a/src/apis/middlewares/auth.rs b/src/apis/middlewares/auth.rs index 758ba1cda..f2745d42e 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/middlewares/auth.rs @@ -5,9 +5,9 @@ use axum::http::Request; use axum::middleware::Next; use axum::response::{IntoResponse, Response}; use serde::Deserialize; +use torrust_tracker_configuration::{Configuration, HttpApi}; use crate::apis::responses::unhandled_rejection_response; -use crate::config::{Configuration, HttpApi}; #[derive(Deserialize, Debug)] pub struct QueryParams { diff --git a/src/apis/server.rs b/src/apis/server.rs index bbb3e5852..5ec22f253 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,15 +1,156 @@ -use std::net::SocketAddr; +use std::net::{SocketAddr, TcpListener}; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use futures::Future; use log::info; +use tokio::task::JoinHandle; use warp::hyper; use super::routes::router; +use crate::signals::shutdown_signal_with_message; use crate::tracker::Tracker; +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[allow(clippy::module_name_repetitions)] +pub type StoppedApiServer = ApiServer; +#[allow(clippy::module_name_repetitions)] +pub type RunningApiServer = ApiServer; + +#[allow(clippy::module_name_repetitions)] +pub struct ApiServer { + pub cfg: torrust_tracker_configuration::HttpApi, + pub tracker: Arc, + pub state: S, +} + +pub struct Stopped; + +pub struct Running { + pub bind_address: SocketAddr, + stop_job_sender: tokio::sync::oneshot::Sender, + job: JoinHandle<()>, +} + +impl ApiServer { + pub fn new(cfg: torrust_tracker_configuration::HttpApi, tracker: Arc) -> Self { + Self { + cfg, + tracker, + state: Stopped {}, + } + } + + /// # Errors + /// + /// Will return `Err` if `TcpListener` can not bind to `bind_address`. + pub fn start(self) -> Result, Error> { + let listener = TcpListener::bind(&self.cfg.bind_address).map_err(|e| Error::Error(e.to_string()))?; + + let bind_address = listener.local_addr().map_err(|e| Error::Error(e.to_string()))?; + + let cfg = self.cfg.clone(); + let tracker = self.tracker.clone(); + + let (sender, receiver) = tokio::sync::oneshot::channel::(); + + let job = tokio::spawn(async move { + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, cfg.ssl_cert_path, cfg.ssl_key_path) { + let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) + .await + .expect("Could not read ssl cert and/or key."); + + start_tls_from_tcp_listener_with_graceful_shutdown(listener, tls_config, &tracker, receiver) + .await + .expect("Could not start from tcp listener with tls."); + } else { + start_from_tcp_listener_with_graceful_shutdown(listener, &tracker, receiver) + .await + .expect("Could not start from tcp listener."); + } + }); + + let running_api_server: ApiServer = ApiServer { + cfg: self.cfg, + tracker: self.tracker, + state: Running { + bind_address, + stop_job_sender: sender, + job, + }, + }; + + Ok(running_api_server) + } +} + +impl ApiServer { + /// # Errors + /// + /// Will return `Err` if the oneshot channel to send the stop signal + /// has already been called once. + pub async fn stop(self) -> Result, Error> { + self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; + + let _ = self.state.job.await; + + let stopped_api_server: ApiServer = ApiServer { + cfg: self.cfg, + tracker: self.tracker, + state: Stopped {}, + }; + + Ok(stopped_api_server) + } +} + +pub fn start_from_tcp_listener_with_graceful_shutdown( + tcp_listener: TcpListener, + tracker: &Arc, + shutdown_signal: tokio::sync::oneshot::Receiver, +) -> impl Future> { + let app = router(tracker); + + let context = tcp_listener.local_addr().expect("Could not get context."); + + axum::Server::from_tcp(tcp_listener) + .expect("Could not bind to tcp listener.") + .serve(app.into_make_service()) + .with_graceful_shutdown(shutdown_signal_with_message( + shutdown_signal, + format!("Shutting down {context}.."), + )) +} + +pub fn start_tls_from_tcp_listener_with_graceful_shutdown( + tcp_listener: TcpListener, + tls_config: RustlsConfig, + tracker: &Arc, + shutdown_signal: tokio::sync::oneshot::Receiver, +) -> impl Future> { + let app = router(tracker); + + let context = tcp_listener.local_addr().expect("Could not get context."); + + let handle = Handle::new(); + + let cloned_handle = handle.clone(); + + tokio::spawn(async move { + shutdown_signal_with_message(shutdown_signal, format!("Shutting down {context}..")).await; + cloned_handle.shutdown(); + }); + + axum_server::from_tcp_rustls(tcp_listener, tls_config) + .handle(handle) + .serve(app.into_make_service()) +} + pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { let app = router(tracker); @@ -41,3 +182,36 @@ pub fn start_tls( .handle(handle) .serve(app.into_make_service()) } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration::ephemeral; + + use crate::apis::server::ApiServer; + use crate::tracker; + use crate::tracker::statistics; + + fn tracker_configuration() -> Arc { + Arc::new(ephemeral()) + } + + #[tokio::test] + async fn it_should_be_able_to_start_from_stopped_state_and_then_stop_again() { + let cfg = tracker_configuration(); + + let tracker = Arc::new(tracker::Tracker::new(&cfg, None, statistics::Repo::new()).unwrap()); + + let stopped_api_server = ApiServer::new(cfg.http_api.clone(), tracker); + + let running_api_server_result = stopped_api_server.start(); + + assert!(running_api_server_result.is_ok()); + + let running_api_server = running_api_server_result.unwrap(); + + assert!(running_api_server.stop().await.is_ok()); + } +} diff --git a/src/databases/driver.rs b/src/databases/driver.rs index c601f1866..4ce6ea515 100644 --- a/src/databases/driver.rs +++ b/src/databases/driver.rs @@ -1,30 +1,22 @@ -use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::DatabaseDriver; use super::error::Error; use super::mysql::Mysql; use super::sqlite::Sqlite; use super::{Builder, Database}; -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] -pub enum Driver { - Sqlite3, - MySQL, -} - -impl Driver { - /// . - /// - /// # Errors - /// - /// This function will return an error if unable to connect to the database. - pub fn build(&self, db_path: &str) -> Result, Error> { - let database = match self { - Driver::Sqlite3 => Builder::::build(db_path), - Driver::MySQL => Builder::::build(db_path), - }?; +/// . +/// +/// # Errors +/// +/// This function will return an error if unable to connect to the database. +pub fn build(driver: &DatabaseDriver, db_path: &str) -> Result, Error> { + let database = match driver { + DatabaseDriver::Sqlite3 => Builder::::build(db_path), + DatabaseDriver::MySQL => Builder::::build(db_path), + }?; - database.create_database_tables().expect("Could not create database tables."); + database.create_database_tables().expect("Could not create database tables."); - Ok(database) - } + Ok(database) } diff --git a/src/databases/error.rs b/src/databases/error.rs index 4bee82f19..68b732190 100644 --- a/src/databases/error.rs +++ b/src/databases/error.rs @@ -2,47 +2,46 @@ use std::panic::Location; use std::sync::Arc; use r2d2_mysql::mysql::UrlError; - -use super::driver::Driver; -use crate::located_error::{Located, LocatedError}; +use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::DatabaseDriver; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { #[error("The {driver} query unexpectedly returned nothing: {source}")] QueryReturnedNoRows { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - driver: Driver, + driver: DatabaseDriver, }, #[error("The {driver} query was malformed: {source}")] InvalidQuery { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - driver: Driver, + driver: DatabaseDriver, }, #[error("Unable to insert record into {driver} database, {location}")] InsertFailed { location: &'static Location<'static>, - driver: Driver, + driver: DatabaseDriver, }, #[error("Failed to remove record from {driver} database, error-code: {error_code}, {location}")] DeleteFailed { location: &'static Location<'static>, error_code: usize, - driver: Driver, + driver: DatabaseDriver, }, #[error("Failed to connect to {driver} database: {source}")] ConnectionError { source: LocatedError<'static, UrlError>, - driver: Driver, + driver: DatabaseDriver, }, #[error("Failed to create r2d2 {driver} connection pool: {source}")] ConnectionPool { source: LocatedError<'static, r2d2::Error>, - driver: Driver, + driver: DatabaseDriver, }, } @@ -52,11 +51,11 @@ impl From for Error { match err { r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows { source: (Arc::new(err) as Arc).into(), - driver: Driver::Sqlite3, + driver: DatabaseDriver::Sqlite3, }, _ => Error::InvalidQuery { source: (Arc::new(err) as Arc).into(), - driver: Driver::Sqlite3, + driver: DatabaseDriver::Sqlite3, }, } } @@ -68,7 +67,7 @@ impl From for Error { let e: Arc = Arc::new(err); Error::InvalidQuery { source: e.into(), - driver: Driver::MySQL, + driver: DatabaseDriver::MySQL, } } } @@ -78,14 +77,14 @@ impl From for Error { fn from(err: UrlError) -> Self { Self::ConnectionError { source: Located(err).into(), - driver: Driver::MySQL, + driver: DatabaseDriver::MySQL, } } } -impl From<(r2d2::Error, Driver)> for Error { +impl From<(r2d2::Error, DatabaseDriver)> for Error { #[track_caller] - fn from(e: (r2d2::Error, Driver)) -> Self { + fn from(e: (r2d2::Error, DatabaseDriver)) -> Self { let (err, driver) = e; Self::ConnectionPool { source: Located(err).into(), diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 4bb28f050..c8117a45c 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -7,14 +7,14 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MysqlConnectionManager; +use torrust_tracker_primitives::DatabaseDriver; -use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; -const DRIVER: Driver = Driver::MySQL; +const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; pub struct Mysql { pool: Pool, diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 8fac09e47..4bf2931de 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -4,14 +4,14 @@ use std::str::FromStr; use async_trait::async_trait; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; +use torrust_tracker_primitives::DatabaseDriver; -use super::driver::Driver; use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; -const DRIVER: Driver = Driver::Sqlite3; +const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; pub struct Sqlite { pool: Pool, @@ -24,7 +24,7 @@ impl Database for Sqlite { /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); - Pool::new(cm).map_or_else(|err| Err((err, Driver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) + Pool::new(cm).map_or_else(|err| Err((err, DatabaseDriver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) } fn create_database_tables(&self) -> Result<(), Error> { diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 9a92b243d..6458e2c2f 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -140,35 +140,37 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { mod tests { use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration; + use crate::http::axum_implementation::requests::announce::Announce; use crate::http::axum_implementation::responses; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; - use crate::tracker::mode::Mode; use crate::tracker::statistics::Keeper; use crate::tracker::{peer, Tracker}; fn private_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Private; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Private; tracker_factory(configuration) } fn listed_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Listed; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Listed; tracker_factory(configuration) } fn tracker_on_reverse_proxy() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.on_reverse_proxy = true; tracker_factory(configuration) } fn tracker_not_on_reverse_proxy() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.on_reverse_proxy = false; tracker_factory(configuration) } diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index ee59b80a3..43bf6c99f 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -98,35 +98,37 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration; + use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::responses; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; - use crate::tracker::mode::Mode; use crate::tracker::statistics::Keeper; use crate::tracker::Tracker; fn private_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Private; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Private; tracker_factory(configuration) } fn listed_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Listed; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Listed; tracker_factory(configuration) } fn tracker_on_reverse_proxy() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.on_reverse_proxy = true; tracker_factory(configuration) } fn tracker_not_on_reverse_proxy() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.on_reverse_proxy = false; tracker_factory(configuration) } diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/axum_implementation/requests/announce.rs index 0f9a6fbfe..6e357ea6d 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/axum_implementation/requests/announce.rs @@ -3,11 +3,11 @@ use std::panic::Location; use std::str::FromStr; use thiserror::Error; +use torrust_tracker_located_error::{Located, LocatedError}; use crate::http::axum_implementation::query::{ParseQueryError, Query}; use crate::http::axum_implementation::responses; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; -use crate::located_error::{Located, LocatedError}; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/axum_implementation/requests/scrape.rs index da50d4be5..505be566e 100644 --- a/src/http/axum_implementation/requests/scrape.rs +++ b/src/http/axum_implementation/requests/scrape.rs @@ -1,11 +1,11 @@ use std::panic::Location; use thiserror::Error; +use torrust_tracker_located_error::{Located, LocatedError}; use crate::http::axum_implementation::query::Query; use crate::http::axum_implementation::responses; use crate::http::percent_encoding::percent_decode_info_hash; -use crate::located_error::{Located, LocatedError}; use crate::protocol::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 5ce0fb1d5..255a73c8f 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -29,17 +29,18 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration; - use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; - use crate::tracker::mode::Mode; use crate::tracker::statistics::Keeper; use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Public; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Public; tracker_factory(configuration) } @@ -93,9 +94,9 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::config::ephemeral_configuration; use crate::http::axum_implementation::services::announce::invoke; use crate::http::axum_implementation::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; use crate::tracker::peer::Peer; @@ -136,7 +137,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - &Arc::new(ephemeral_configuration()), + &Arc::new(configuration::ephemeral()), Some(stats_event_sender), statistics::Repo::new(), ) @@ -149,7 +150,7 @@ mod tests { } fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.external_ip = Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); @@ -200,7 +201,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - &Arc::new(ephemeral_configuration()), + &Arc::new(configuration::ephemeral()), Some(stats_event_sender), statistics::Repo::new(), ) diff --git a/src/http/warp_implementation/error.rs b/src/http/warp_implementation/error.rs index f07c32f6d..55b22c27a 100644 --- a/src/http/warp_implementation/error.rs +++ b/src/http/warp_implementation/error.rs @@ -1,10 +1,9 @@ use std::panic::Location; use thiserror::Error; +use torrust_tracker_located_error::LocatedError; use warp::reject::Reject; -use crate::located_error::LocatedError; - #[derive(Error, Debug)] pub enum Error { #[error("tracker server error: {source}")] diff --git a/src/http/warp_implementation/filter_helpers.rs b/src/http/warp_implementation/filter_helpers.rs index 89188d868..583d38352 100644 --- a/src/http/warp_implementation/filter_helpers.rs +++ b/src/http/warp_implementation/filter_helpers.rs @@ -3,8 +3,7 @@ use std::panic::Location; use std::str::FromStr; use thiserror::Error; - -use crate::located_error::{Located, LocatedError}; +use torrust_tracker_located_error::{Located, LocatedError}; #[derive(Error, Debug)] pub enum XForwardedForParseError { diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index aa96af884..ce546f608 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -5,8 +5,8 @@ use axum_server::tls_rustls::RustlsConfig; use log::{info, warn}; use tokio::sync::oneshot; use tokio::task::JoinHandle; +use torrust_tracker_configuration::HttpTracker; -use crate::config::HttpTracker; use crate::http::axum_implementation::server; use crate::http::warp_implementation::server::Http; use crate::http::Version; diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 073ceda61..4c4ed1f53 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use chrono::Utc; use log::info; use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; -use crate::config::Configuration; use crate::tracker; #[must_use] diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs index 00e39eeba..85bb1b59f 100644 --- a/src/jobs/tracker_apis.rs +++ b/src/jobs/tracker_apis.rs @@ -4,9 +4,9 @@ use axum_server::tls_rustls::RustlsConfig; use log::info; use tokio::sync::oneshot; use tokio::task::JoinHandle; +use torrust_tracker_configuration::HttpApi; use crate::apis::server; -use crate::config::HttpApi; use crate::tracker; #[derive(Debug)] diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index d0907c976..468f6dbbd 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; +use torrust_tracker_configuration::UdpTracker; -use crate::config::UdpTracker; use crate::tracker; use crate::udp::server::Udp; diff --git a/src/lib.rs b/src/lib.rs index cbda2854c..f80bcfb6c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,9 +1,7 @@ pub mod apis; -pub mod config; pub mod databases; pub mod http; pub mod jobs; -pub mod located_error; pub mod logging; pub mod protocol; pub mod setup; @@ -32,3 +30,47 @@ pub mod ephemeral_instance_keys { pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); } } + +pub mod signals { + use log::info; + + /// Resolves on `ctrl_c` or the `terminate` signal. + pub async fn global_shutdown_signal() { + let ctrl_c = async { + tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {} + } + } + + /// Resolves when the `stop_receiver` or the `global_shutdown_signal()` resolves. + pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { + let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; + + tokio::select! { + _ = stop => {}, + _ = global_shutdown_signal() => {} + } + } + + /// Same as `shutdown_signal()`, but shows a message when it resolves. + pub async fn shutdown_signal_with_message(stop_receiver: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal(stop_receiver).await; + + info!("{message}"); + } +} diff --git a/src/logging.rs b/src/logging.rs index 4d16f7670..83e2c9360 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -2,8 +2,7 @@ use std::str::FromStr; use std::sync::Once; use log::{info, LevelFilter}; - -use crate::config::Configuration; +use torrust_tracker_configuration::Configuration; static INIT: Once = Once::new(); diff --git a/src/main.rs b/src/main.rs index 199e8f5c5..fcb8331a4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,9 +2,9 @@ use std::env; use std::sync::Arc; use log::info; -use torrust_tracker::config::Configuration; use torrust_tracker::stats::setup_statistics; use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, tracker}; +use torrust_tracker_configuration::Configuration; #[tokio::main] async fn main() { diff --git a/src/setup.rs b/src/setup.rs index 98d311178..5b51632a7 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; -use crate::config::Configuration; use crate::http::Version; use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::tracker; diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 84252f667..00663c383 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -10,8 +10,8 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; +use torrust_tracker_located_error::LocatedError; -use crate::located_error::LocatedError; use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; diff --git a/src/tracker/error.rs b/src/tracker/error.rs index 080903da6..10ca5ec19 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -1,6 +1,6 @@ use std::panic::Location; -use crate::located_error::LocatedError; +use torrust_tracker_located_error::LocatedError; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 2ebc4bfc3..18ada69e0 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,6 +1,5 @@ pub mod auth; pub mod error; -pub mod mode; pub mod peer; pub mod services; pub mod statistics; @@ -15,19 +14,19 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_configuration::Configuration; +use torrust_tracker_primitives::TrackerMode; use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::{SwarmMetadata, SwarmStats}; -use crate::config::Configuration; -use crate::databases::driver::Driver; use crate::databases::{self, Database}; use crate::protocol::info_hash::InfoHash; pub struct Tracker { pub config: Arc, - mode: mode::Mode, + mode: TrackerMode, keys: RwLock>, whitelist: RwLock>, torrents: RwLock>, @@ -96,7 +95,7 @@ impl Tracker { stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = Driver::build(&config.db_driver, &config.db_path)?; + let database = databases::driver::build(&config.db_driver, &config.db_path)?; Ok(Tracker { config: config.clone(), @@ -111,15 +110,15 @@ impl Tracker { } pub fn is_public(&self) -> bool { - self.mode == mode::Mode::Public + self.mode == TrackerMode::Public } pub fn is_private(&self) -> bool { - self.mode == mode::Mode::Private || self.mode == mode::Mode::PrivateListed + self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed } pub fn is_whitelisted(&self) -> bool { - self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed + self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed } pub fn requires_authentication(&self) -> bool { @@ -554,35 +553,36 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration::{self}; - use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; - use crate::tracker::mode::Mode; use crate::tracker::peer::{self, Peer}; use crate::tracker::statistics::Keeper; use crate::tracker::{TorrentsMetrics, Tracker}; pub fn public_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Public; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Public; tracker_factory(configuration) } pub fn private_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Private; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Private; tracker_factory(configuration) } pub fn whitelisted_tracker() -> Tracker { - let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Listed; + let mut configuration = configuration::ephemeral(); + configuration.mode = TrackerMode::Listed; tracker_factory(configuration) } pub fn tracker_persisting_torrents_in_database() -> Tracker { - let mut configuration = ephemeral_configuration(); + let mut configuration = configuration::ephemeral(); configuration.persistent_torrent_completed_stat = true; tracker_factory(configuration) } diff --git a/src/tracker/services/common.rs b/src/tracker/services/common.rs index 8757e6a21..39aa3cc0b 100644 --- a/src/tracker/services/common.rs +++ b/src/tracker/services/common.rs @@ -1,6 +1,7 @@ use std::sync::Arc; -use crate::config::Configuration; +use torrust_tracker_configuration::Configuration; + use crate::tracker::statistics::Keeper; use crate::tracker::Tracker; diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 745f5563c..c0aaf9c64 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -36,13 +36,15 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { mod tests { use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration::ephemeral; + use crate::tracker; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + Arc::new(ephemeral()) } #[tokio::test] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index e2353876e..ce652a091 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -137,14 +137,16 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration::ephemeral; + use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrent_info, Info}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + Arc::new(ephemeral()) } #[tokio::test] @@ -190,14 +192,16 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use crate::config::{ephemeral_configuration, Configuration}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration::ephemeral; + use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + Arc::new(ephemeral()) } #[tokio::test] diff --git a/src/udp/error.rs b/src/udp/error.rs index de66eb2bf..a6381cc78 100644 --- a/src/udp/error.rs +++ b/src/udp/error.rs @@ -1,8 +1,7 @@ use std::panic::Location; use thiserror::Error; - -use crate::located_error::LocatedError; +use torrust_tracker_located_error::LocatedError; #[derive(Error, Debug)] pub enum Error { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 8fda77fb4..4a0874c72 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -250,31 +250,33 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration::ephemeral; - use crate::config::{ephemeral_configuration, Configuration}; use crate::protocol::clock::{Current, Time}; - use crate::tracker::{self, mode, peer, statistics}; + use crate::tracker::{self, peer, statistics}; fn tracker_configuration() -> Arc { Arc::new(default_testing_tracker_configuration()) } fn default_testing_tracker_configuration() -> Configuration { - ephemeral_configuration() + ephemeral() } fn initialized_public_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Public).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); initialized_tracker(&configuration) } fn initialized_private_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Private).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); initialized_tracker(&configuration) } fn initialized_whitelisted_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Listed).into()); + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); initialized_tracker(&configuration) } @@ -354,7 +356,7 @@ mod tests { self } - pub fn with_mode(mut self, mode: mode::Mode) -> Self { + pub fn with_mode(mut self, mode: TrackerMode) -> Self { self.configuration.mode = mode; self } diff --git a/src/udp/server.rs b/src/udp/server.rs index e85c81e9d..f74468189 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -1,25 +1,113 @@ +use std::future::Future; use std::io::Cursor; use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::Response; +use futures::pin_mut; use log::{debug, error, info}; use tokio::net::UdpSocket; +use tokio::task::JoinHandle; -use crate::tracker; +use crate::signals::shutdown_signal; +use crate::tracker::Tracker; use crate::udp::handlers::handle_packet; use crate::udp::MAX_PACKET_SIZE; +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[allow(clippy::module_name_repetitions)] +pub type StoppedUdpServer = UdpServer; +#[allow(clippy::module_name_repetitions)] +pub type RunningUdpServer = UdpServer; + +#[allow(clippy::module_name_repetitions)] +pub struct UdpServer { + pub cfg: torrust_tracker_configuration::UdpTracker, + pub tracker: Arc, + pub state: S, +} + +pub struct Stopped; + +pub struct Running { + pub bind_address: SocketAddr, + stop_job_sender: tokio::sync::oneshot::Sender, + job: JoinHandle<()>, +} + +impl UdpServer { + pub fn new(cfg: torrust_tracker_configuration::UdpTracker, tracker: Arc) -> Self { + Self { + cfg, + tracker, + state: Stopped {}, + } + } + + /// # Errors + /// + /// Will return `Err` if UDP can't bind to given bind address. + pub async fn start(self) -> Result, Error> { + let udp = Udp::new(self.tracker.clone(), &self.cfg.bind_address) + .await + .map_err(|e| Error::Error(e.to_string()))?; + + let bind_address = udp.socket.local_addr().map_err(|e| Error::Error(e.to_string()))?; + + let (sender, receiver) = tokio::sync::oneshot::channel::(); + + let job = tokio::spawn(async move { + udp.start_with_graceful_shutdown(shutdown_signal(receiver)).await; + }); + + let running_udp_server: UdpServer = UdpServer { + cfg: self.cfg, + tracker: self.tracker, + state: Running { + bind_address, + stop_job_sender: sender, + job, + }, + }; + + Ok(running_udp_server) + } +} + +impl UdpServer { + /// # Errors + /// + /// Will return `Err` if the oneshot channel to send the stop signal + /// has already been called once. + pub async fn stop(self) -> Result, Error> { + self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; + + let _ = self.state.job.await; + + let stopped_api_server: UdpServer = UdpServer { + cfg: self.cfg, + tracker: self.tracker, + state: Stopped {}, + }; + + Ok(stopped_api_server) + } +} + pub struct Udp { socket: Arc, - tracker: Arc, + tracker: Arc, } impl Udp { /// # Errors /// /// Will return `Err` unable to bind to the supplied `bind_address`. - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { + pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { let socket = UdpSocket::bind(bind_address).await?; Ok(Udp { @@ -57,6 +145,41 @@ impl Udp { } } + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied “socket“. + async fn start_with_graceful_shutdown(&self, shutdown_signal: F) + where + F: Future, + { + // Pin the future so that it doesn't move to the first loop iteration. + pin_mut!(shutdown_signal); + + loop { + let mut data = [0; MAX_PACKET_SIZE]; + let socket = self.socket.clone(); + let tracker = self.tracker.clone(); + + tokio::select! { + _ = &mut shutdown_signal => { + info!("Stopping UDP server: {}..", self.socket.local_addr().unwrap()); + break; + } + Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { + let payload = data[..valid_bytes].to_vec(); + + info!("Received {} bytes", payload.len()); + debug!("From: {}", &remote_addr); + debug!("Payload: {:?}", payload); + + let response = handle_packet(remote_addr, payload, tracker).await; + + Udp::send_response(socket, remote_addr, response).await; + } + } + } + } + async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); diff --git a/tests/api/mod.rs b/tests/api/mod.rs index 8dd6f4c53..fcb24e491 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -5,7 +5,7 @@ use torrust_tracker::tracker::Tracker; pub mod asserts; pub mod client; pub mod connection_info; -pub mod server; +pub mod test_environment; /// It forces a database error by dropping all tables. /// That makes any query fail. diff --git a/tests/api/server.rs b/tests/api/server.rs deleted file mode 100644 index 0e23a4320..000000000 --- a/tests/api/server.rs +++ /dev/null @@ -1,78 +0,0 @@ -use core::panic; -use std::sync::Arc; - -use torrust_tracker::config::{ephemeral_configuration, Configuration}; -use torrust_tracker::jobs::tracker_apis; -use torrust_tracker::protocol::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - -use super::connection_info::ConnectionInfo; - -pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) -} - -pub async fn start_default_api() -> Server { - let configuration = tracker_configuration(); - start_custom_api(configuration.clone()).await -} - -pub async fn start_custom_api(configuration: Arc) -> Server { - let server = start(&configuration); - tracker_apis::start_job(&configuration.http_api, server.tracker.clone()).await; - server -} - -fn start(configuration: &Arc) -> Server { - let connection_info = ConnectionInfo::authenticated( - &configuration.http_api.bind_address.clone(), - &configuration.http_api.access_tokens.get_key_value("admin").unwrap().1.clone(), - ); - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - Server { - tracker, - connection_info, - } -} - -pub struct Server { - pub tracker: Arc, - pub connection_info: ConnectionInfo, -} - -impl Server { - pub fn get_connection_info(&self) -> ConnectionInfo { - self.connection_info.clone() - } - - pub fn get_bind_address(&self) -> String { - self.connection_info.bind_address.clone() - } - - /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs new file mode 100644 index 000000000..ff143ec7a --- /dev/null +++ b/tests/api/test_environment.rs @@ -0,0 +1,134 @@ +use core::panic; +use std::sync::Arc; + +use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::Tracker; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; +use torrust_tracker_configuration::Configuration; +use torrust_tracker_test_helpers::configuration::ephemeral; + +use super::connection_info::ConnectionInfo; + +#[allow(clippy::module_name_repetitions, dead_code)] +pub type StoppedTestEnvironment = TestEnvironment; +#[allow(clippy::module_name_repetitions)] +pub type RunningTestEnvironment = TestEnvironment; + +pub struct TestEnvironment { + pub tracker: Arc, + pub state: S, +} + +#[allow(dead_code)] +pub struct Stopped { + api_server: StoppedApiServer, +} + +pub struct Running { + api_server: RunningApiServer, +} + +impl TestEnvironment { + /// Add a torrent to the tracker + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl TestEnvironment { + #[allow(dead_code)] + pub fn new_stopped() -> Self { + let api_server = api_server(); + + Self { + tracker: api_server.tracker.clone(), + state: Stopped { api_server }, + } + } + + #[allow(dead_code)] + pub fn start(self) -> TestEnvironment { + TestEnvironment { + tracker: self.tracker, + state: Running { + api_server: self.state.api_server.start().unwrap(), + }, + } + } +} + +impl TestEnvironment { + pub fn new_running() -> Self { + let api_server = running_api_server(); + + Self { + tracker: api_server.tracker.clone(), + state: Running { api_server }, + } + } + + pub async fn stop(self) -> TestEnvironment { + TestEnvironment { + tracker: self.tracker, + state: Stopped { + api_server: self.state.api_server.stop().await.unwrap(), + }, + } + } + + pub fn get_connection_info(&self) -> ConnectionInfo { + ConnectionInfo { + bind_address: self.state.api_server.state.bind_address.to_string(), + api_token: self.state.api_server.cfg.access_tokens.get("admin").cloned(), + } + } +} + +#[allow(clippy::module_name_repetitions)] +pub fn running_test_environment() -> RunningTestEnvironment { + TestEnvironment::new_running() +} + +pub fn tracker_configuration() -> Arc { + Arc::new(ephemeral()) +} + +// TODO: Move to test-helpers crate once `Tracker` is isolated. +pub fn tracker_instance(configuration: &Arc) -> Arc { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + tracker +} + +pub fn api_server() -> StoppedApiServer { + let config = tracker_configuration(); + + let tracker = tracker_instance(&config); + + ApiServer::new(config.http_api.clone(), tracker) +} + +pub fn running_api_server() -> RunningApiServer { + api_server().start().unwrap() +} diff --git a/tests/http/server.rs b/tests/http/server.rs index 1c8d1cb77..147ad93c1 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -2,35 +2,35 @@ use core::panic; use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; -use torrust_tracker::config::{ephemeral_configuration, Configuration}; use torrust_tracker::http::Version; use torrust_tracker::jobs::http_tracker; use torrust_tracker::protocol::info_hash::InfoHash; -use torrust_tracker::tracker::mode::Mode; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; +use torrust_tracker_configuration::{ephemeral_configuration, Configuration}; +use torrust_tracker_primitives::TrackerMode; use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" in settings pub async fn start_public_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Public; + configuration.mode = TrackerMode::Public; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "listed" in settings pub async fn start_whitelisted_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Listed; + configuration.mode = TrackerMode::Listed; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "private" in settings pub async fn start_private_http_tracker(version: Version) -> Server { let mut configuration = ephemeral_configuration(); - configuration.mode = Mode::Private; + configuration.mode = TrackerMode::Private; start_custom_http_tracker(Arc::new(configuration), version).await } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index 35d9af248..ccdcded5e 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -27,74 +27,84 @@ mod tracker_apis { mod authentication { use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; use crate::common::http::{Query, QueryParam}; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let token = api_server.get_connection_info().api_token.unwrap(); + let token = test_env.get_connection_info().api_token.unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) .await; assert_eq!(response.status(), 200); + + test_env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::default()) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) .await; assert_token_not_valid(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) .await; assert_token_not_valid(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let token = api_server.get_connection_info().api_token.unwrap(); + let token = test_env.get_connection_info().api_token.unwrap(); // At the beginning of the query component - let response = Client::new(api_server.get_connection_info()) - .get_request(&format!("torrents?token={}&limit=1", &token)) + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?token={token}&limit=1")) .await; assert_eq!(response.status(), 200); // At the end of the query component - let response = Client::new(api_server.get_connection_info()) - .get_request(&format!("torrents?limit=1&token={}", &token)) + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?limit=1&token={token}")) .await; assert_eq!(response.status(), 200); + + test_env.stop().await; } } @@ -107,21 +117,21 @@ mod tracker_apis { use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; use crate::common::fixtures::PeerBuilder; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - api_server + test_env .add_torrent_peer( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), &PeerBuilder::default().into(), ) .await; - let response = Client::new(api_server.get_connection_info()).get_tracker_statistics().await; + let response = Client::new(test_env.get_connection_info()).get_tracker_statistics().await; assert_stats( response, @@ -145,23 +155,29 @@ mod tracker_apis { }, ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_tracker_statistics() - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_tracker_statistics() + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .get_tracker_statistics() .await; assert_unauthorized(response).await; + + test_env.stop().await; } } @@ -179,21 +195,19 @@ mod tracker_apis { }; use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; use crate::common::fixtures::PeerBuilder; use crate::common::http::{Query, QueryParam}; #[tokio::test] async fn should_allow_getting_torrents() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - let response = Client::new(api_server.get_connection_info()) - .get_torrents(Query::empty()) - .await; + let response = Client::new(test_env.get_connection_info()).get_torrents(Query::empty()).await; assert_torrent_list( response, @@ -206,24 +220,22 @@ mod tracker_apis { }], ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server - .add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()) - .await; - api_server - .add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()) - .await; + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -238,24 +250,22 @@ mod tracker_apis { }], ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - api_server - .add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()) - .await; - api_server - .add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()) - .await; + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -270,66 +280,76 @@ mod tracker_apis { }], ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; for invalid_offset in &invalid_offsets { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; } + + test_env.stop().await; } #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; for invalid_limit in &invalid_limits { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; } + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_torrents(Query::empty()) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrents(Query::empty()) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .get_torrents(Query::default()) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let peer = PeerBuilder::default().into(); - api_server.add_torrent_peer(&info_hash, &peer).await; + test_env.add_torrent_peer(&info_hash, &peer).await; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -344,27 +364,31 @@ mod tracker_apis { }, ) .await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; assert_torrent_not_known(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrent(invalid_infohash) .await; @@ -372,33 +396,39 @@ mod tracker_apis { } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .get_torrent(invalid_infohash) .await; assert_not_found(response).await; } + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - api_server.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .get_torrent(&info_hash.to_string()) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrent(&info_hash.to_string()) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .get_torrent(&info_hash.to_string()) .await; assert_unauthorized(response).await; + + test_env.stop().await; } } @@ -416,82 +446,92 @@ mod tracker_apis { use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::force_database_error; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; assert_ok(response).await; assert!( - api_server + test_env .tracker .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); + + test_env.stop().await; } #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(api_server.get_connection_info()); + let api_client = Client::new(test_env.get_connection_info()); let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .whitelist_a_torrent(&info_hash) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .whitelist_a_torrent(&info_hash) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .whitelist_a_torrent(&info_hash) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .whitelist_a_torrent(&info_hash) .await; assert_failed_to_whitelist_torrent(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .whitelist_a_torrent(invalid_infohash) .await; @@ -499,49 +539,55 @@ mod tracker_apis { } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .whitelist_a_torrent(invalid_infohash) .await; assert_not_found(response).await; } + + test_env.stop().await; } #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; assert_ok(response).await; - assert!(!api_server.tracker.is_info_hash_whitelisted(&info_hash).await); + assert!(!test_env.tracker.is_info_hash_whitelisted(&info_hash).await); + + test_env.stop().await; } #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) .await; assert_ok(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(invalid_infohash) .await; @@ -549,89 +595,101 @@ mod tracker_apis { } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(invalid_infohash) .await; assert_not_found(response).await; } + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; assert_failed_to_remove_torrent_from_whitelist(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .remove_torrent_from_whitelist(&hash) - .await; + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .remove_torrent_from_whitelist(&hash) + .await; assert_token_not_valid(response).await; - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .remove_torrent_from_whitelist(&hash) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; assert_ok(response).await; /* todo: this assert fails because the whitelist has not been reloaded yet. We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent is whitelisted and use that endpoint to check if the torrent is still there after reloading. assert!( - !(api_server + !(test_env .tracker .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await) ); */ + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - api_server.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()).reload_whitelist().await; + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; assert_failed_to_reload_whitelist(response).await; + + test_env.stop().await; } } @@ -648,50 +706,56 @@ mod tracker_apis { use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::force_database_error; - use crate::api::server::start_default_api; + use crate::api::test_environment::running_test_environment; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .generate_auth_key(seconds_valid) .await; let auth_key_resource = assert_auth_key_utf8(response).await; // Verify the key with the tracker - assert!(api_server + assert!(test_env .tracker .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .generate_auth_key(seconds_valid) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .generate_auth_key(seconds_valid) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .generate_auth_key(seconds_valid) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let invalid_key_durations = [ // "", it returns 404 @@ -700,49 +764,55 @@ mod tracker_apis { ]; for invalid_key_duration in &invalid_key_durations { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .post(&format!("key/{}", &invalid_key_duration)) .await; assert_invalid_key_duration_param(response, invalid_key_duration).await; } + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); let seconds_valid = 60; - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .generate_auth_key(seconds_valid) .await; assert_failed_to_generate_key(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - let auth_key = api_server + let auth_key = test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .delete_auth_key(&auth_key.key.to_string()) .await; assert_ok(response).await; + + test_env.stop().await; } #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_is_invalid() { - let api_server = start_default_api().await; + async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { + let test_env = running_test_environment(); let invalid_auth_keys = [ // "", it returns a 404 @@ -755,123 +825,139 @@ mod tracker_apis { ]; for invalid_auth_key in &invalid_auth_keys { - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .delete_auth_key(invalid_auth_key) .await; assert_invalid_auth_key_param(response, invalid_auth_key).await; } + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - let auth_key = api_server + let auth_key = test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()) + let response = Client::new(test_env.get_connection_info()) .delete_auth_key(&auth_key.key.to_string()) .await; assert_failed_to_delete_key(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; // Generate new auth key - let auth_key = api_server + let auth_key = test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .delete_auth_key(&auth_key.key.to_string()) - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .delete_auth_key(&auth_key.key.to_string()) + .await; assert_token_not_valid(response).await; // Generate new auth key - let auth_key = api_server + let auth_key = test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .delete_auth_key(&auth_key.key.to_string()) .await; assert_unauthorized(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_allow_reloading_keys() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - api_server + test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(api_server.get_connection_info()).reload_keys().await; + let response = Client::new(test_env.get_connection_info()).reload_keys().await; assert_ok(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - api_server + test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - force_database_error(&api_server.tracker); + force_database_error(&test_env.tracker); - let response = Client::new(api_server.get_connection_info()).reload_keys().await; + let response = Client::new(test_env.get_connection_info()).reload_keys().await; assert_failed_to_reload_keys(response).await; + + test_env.stop().await; } #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let api_server = start_default_api().await; + let test_env = running_test_environment(); let seconds_valid = 60; - api_server + test_env .tracker .generate_auth_key(Duration::from_secs(seconds_valid)) .await .unwrap(); - let response = Client::new(connection_with_invalid_token(&api_server.get_bind_address())) - .reload_keys() - .await; + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .reload_keys() + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(&api_server.get_bind_address())) + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) .reload_keys() .await; assert_unauthorized(response).await; + + test_env.stop().await; } } } diff --git a/tests/udp/client.rs b/tests/udp/client.rs index 3cb4d6134..0bec03d03 100644 --- a/tests/udp/client.rs +++ b/tests/udp/client.rs @@ -1,41 +1,54 @@ use std::io::Cursor; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::sync::Arc; use aquatic_udp_protocol::{Request, Response}; -use rand::{thread_rng, Rng}; +use tokio::net::UdpSocket; use torrust_tracker::udp::MAX_PACKET_SIZE; -use crate::common::udp::Client as UdpClient; +use crate::udp::source_address; -/// Creates a new generic UDP client connected to a generic UDP server -pub async fn new_udp_client_connected(remote_address: &SocketAddr) -> UdpClient { - let local_address = loopback_socket_address(ephemeral_random_client_port()); - UdpClient::connected(remote_address, &local_address).await +#[allow(clippy::module_name_repetitions)] +pub struct UdpClient { + pub socket: Arc, } -/// Creates a new UDP tracker client connected to a UDP tracker server -pub async fn new_udp_tracker_client_connected(remote_address: &SocketAddr) -> Client { - let udp_client = new_udp_client_connected(remote_address).await; - Client { udp_client } -} +impl UdpClient { + pub async fn bind(local_address: &str) -> Self { + let socket = UdpSocket::bind(local_address).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } -pub fn ephemeral_random_client_port() -> u16 { - // todo: this may produce random test failures because two tests can try to bind the same port. - // We could create a pool of available ports (with read/write lock) - let mut rng = thread_rng(); - rng.gen_range(49152..65535) + pub async fn connect(&self, remote_address: &str) { + self.socket.connect(remote_address).await.unwrap(); + } + + pub async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + pub async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } } -fn loopback_socket_address(port: u16) -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port) +/// Creates a new `UdpClient` connected to a Udp server +pub async fn new_udp_client_connected(remote_address: &str) -> UdpClient { + let port = 0; // Let OS choose an unused port. + let client = UdpClient::bind(&source_address(port)).await; + client.connect(remote_address).await; + client } -/// A UDP tracker client -pub struct Client { - pub udp_client: UdpClient, // A generic UDP client +#[allow(clippy::module_name_repetitions)] +pub struct UdpTrackerClient { + pub udp_client: UdpClient, } -impl Client { +impl UdpTrackerClient { pub async fn send(&self, request: Request) -> usize { // Write request into a buffer let request_buffer = vec![0u8; MAX_PACKET_SIZE]; @@ -63,3 +76,9 @@ impl Client { Response::from_bytes(&response_buffer[..payload_size], true).unwrap() } } + +/// Creates a new `UdpTrackerClient` connected to a Udp Tracker server +pub async fn new_udp_tracker_client_connected(remote_address: &str) -> UdpTrackerClient { + let udp_client = new_udp_client_connected(remote_address).await; + UdpTrackerClient { udp_client } +} diff --git a/tests/udp/mod.rs b/tests/udp/mod.rs index 16a77bb99..f45a4a4f9 100644 --- a/tests/udp/mod.rs +++ b/tests/udp/mod.rs @@ -1,3 +1,8 @@ pub mod asserts; pub mod client; -pub mod server; +pub mod test_environment; + +/// Generates the source address for the UDP client +fn source_address(port: u16) -> String { + format!("127.0.0.1:{port}") +} diff --git a/tests/udp/server.rs b/tests/udp/server.rs deleted file mode 100644 index 401d4cf92..000000000 --- a/tests/udp/server.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::net::SocketAddr; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; - -use tokio::task::JoinHandle; -use torrust_tracker::config::{ephemeral_configuration, Configuration}; -use torrust_tracker::jobs::udp_tracker; -use torrust_tracker::tracker::statistics::Keeper; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; - -pub fn start_udp_tracker(configuration: &Arc) -> Server { - let mut udp_server = Server::new(); - udp_server.start(configuration); - udp_server -} - -pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) -} -pub struct Server { - pub started: AtomicBool, - pub job: Option>, - pub bind_address: Option, -} - -impl Server { - pub fn new() -> Self { - Self { - started: AtomicBool::new(false), - job: None, - bind_address: None, - } - } - - pub fn start(&mut self, configuration: &Arc) { - if !self.started.load(Ordering::Relaxed) { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - let udp_tracker_config = &configuration.udp_trackers[0]; - - // Start the UDP tracker job - self.job = Some(udp_tracker::start_job(udp_tracker_config, tracker)); - - self.bind_address = Some(udp_tracker_config.bind_address.parse().unwrap()); - - self.started.store(true, Ordering::Relaxed); - } - } -} diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs new file mode 100644 index 000000000..e53a7a580 --- /dev/null +++ b/tests/udp/test_environment.rs @@ -0,0 +1,131 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::Tracker; +use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; +use torrust_tracker_configuration::Configuration; +use torrust_tracker_test_helpers::configuration::ephemeral; + +fn tracker_configuration() -> Arc { + Arc::new(ephemeral()) +} + +#[allow(clippy::module_name_repetitions, dead_code)] +pub type StoppedTestEnvironment = TestEnvironment; +#[allow(clippy::module_name_repetitions)] +pub type RunningTestEnvironment = TestEnvironment; + +pub struct TestEnvironment { + pub tracker: Arc, + pub state: S, +} + +#[allow(dead_code)] +pub struct Stopped { + api_server: StoppedUdpServer, +} + +pub struct Running { + api_server: RunningUdpServer, +} + +impl TestEnvironment { + /// Add a torrent to the tracker + #[allow(dead_code)] + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl TestEnvironment { + #[allow(dead_code)] + pub fn new_stopped() -> Self { + let udp_server = udp_server(); + + Self { + tracker: udp_server.tracker.clone(), + state: Stopped { api_server: udp_server }, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> TestEnvironment { + TestEnvironment { + tracker: self.tracker, + state: Running { + api_server: self.state.api_server.start().await.unwrap(), + }, + } + } +} + +impl TestEnvironment { + pub async fn new_running() -> Self { + let udp_server = running_udp_server().await; + + Self { + tracker: udp_server.tracker.clone(), + state: Running { api_server: udp_server }, + } + } + + #[allow(dead_code)] + pub async fn stop(self) -> TestEnvironment { + TestEnvironment { + tracker: self.tracker, + state: Stopped { + api_server: self.state.api_server.stop().await.unwrap(), + }, + } + } + + pub fn bind_address(&self) -> SocketAddr { + self.state.api_server.state.bind_address + } +} + +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment() -> RunningTestEnvironment { + TestEnvironment::new_running().await +} + +// TODO: Move to test-helpers crate once `Tracker` is isolated. +pub fn tracker_instance(configuration: &Arc) -> Arc { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + tracker +} + +pub fn udp_server() -> StoppedUdpServer { + let config = tracker_configuration(); + + let tracker = tracker_instance(&config); + + UdpServer::new(config.udp_trackers[0].clone(), tracker) +} + +pub async fn running_udp_server() -> RunningUdpServer { + udp_server().start().await.unwrap() +} diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs index 0287d01b7..b7cc3bd6f 100644 --- a/tests/udp_tracker.rs +++ b/tests/udp_tracker.rs @@ -19,8 +19,8 @@ mod udp_tracker_server { use torrust_tracker::udp::MAX_PACKET_SIZE; use crate::udp::asserts::is_error_response; - use crate::udp::client::{new_udp_client_connected, Client}; - use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp::client::{new_udp_client_connected, UdpTrackerClient}; + use crate::udp::test_environment::running_test_environment; fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { [0; MAX_PACKET_SIZE] @@ -30,7 +30,7 @@ mod udp_tracker_server { [0; MAX_PACKET_SIZE] } - async fn send_connection_request(transaction_id: TransactionId, client: &Client) -> ConnectionId { + async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { let connect_request = ConnectRequest { transaction_id }; client.send(connect_request.into()).await; @@ -45,11 +45,9 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let configuration = tracker_configuration(); + let test_env = running_test_environment().await; - let udp_server = start_udp_tracker(&configuration); - - let client = new_udp_client_connected(&udp_server.bind_address.unwrap()).await; + let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; client.send(&empty_udp_request()).await; @@ -65,15 +63,13 @@ mod udp_tracker_server { use crate::udp::asserts::is_connect_response; use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp::test_environment::running_test_environment; #[tokio::test] async fn should_return_a_connect_response() { - let configuration = tracker_configuration(); - - let udp_server = start_udp_tracker(&configuration); + let test_env = running_test_environment().await; - let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; let connect_request = ConnectRequest { transaction_id: TransactionId(123), @@ -97,16 +93,14 @@ mod udp_tracker_server { use crate::udp::asserts::is_ipv4_announce_response; use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp::test_environment::running_test_environment; use crate::udp_tracker_server::send_connection_request; #[tokio::test] async fn should_return_an_announce_response() { - let configuration = tracker_configuration(); + let test_env = running_test_environment().await; - let udp_server = start_udp_tracker(&configuration); - - let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; let connection_id = send_connection_request(TransactionId(123), &client).await; @@ -140,16 +134,14 @@ mod udp_tracker_server { use crate::udp::asserts::is_scrape_response; use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::server::{start_udp_tracker, tracker_configuration}; + use crate::udp::test_environment::running_test_environment; use crate::udp_tracker_server::send_connection_request; #[tokio::test] async fn should_return_a_scrape_response() { - let configuration = tracker_configuration(); - - let udp_server = start_udp_tracker(&configuration); + let test_env = running_test_environment().await; - let client = new_udp_tracker_client_connected(&udp_server.bind_address.unwrap()).await; + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; let connection_id = send_connection_request(TransactionId(123), &client).await; From a72c12349b852734b8f81478381cf2b50037aeac Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 23 Feb 2023 16:43:08 +0100 Subject: [PATCH 387/435] refactor: removed duplicate ephemeral configuration fn --- packages/configuration/src/lib.rs | 39 ---------------------- packages/test-helpers/src/configuration.rs | 25 ++++++++++---- tests/http/server.rs | 17 +++++----- 3 files changed, 27 insertions(+), 54 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 2121752c5..1003a92db 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -101,45 +101,6 @@ impl From for Error { } } -/// This configuration is used for testing. It generates random config values so they do not collide -/// if you run more than one tracker at the same time. -/// -/// # Panics -/// -/// Will panic if it can't convert the temp file path to string -#[must_use] -pub fn ephemeral_configuration() -> Configuration { - // todo: disable services that are not needed. - // For example: a test for the UDP tracker should disable the API and HTTP tracker. - - let mut config = Configuration { - log_level: Some("off".to_owned()), // Change to `debug` for tests debugging - ..Default::default() - }; - - // Ephemeral socket address for API - let api_port = 0u16; - config.http_api.enabled = true; - config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); - - // Ephemeral socket address for UDP tracker - let udp_port = 0u16; - config.udp_trackers[0].enabled = true; - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &udp_port); - - // Ephemeral socket address for HTTP tracker - let http_port = 0u16; - config.http_trackers[0].enabled = true; - config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); - - // Ephemeral sqlite database - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("data_{}_{}_{}.db", &api_port, &udp_port, &http_port)); - config.db_path = temp_file.to_str().unwrap().to_owned(); - - config -} - impl Default for Configuration { fn default() -> Self { let mut configuration = Configuration { diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index f7c584d55..a978a050b 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -5,29 +5,40 @@ use torrust_tracker_configuration::Configuration; use crate::random; /// This configuration is used for testing. It generates random config values so they do not collide -/// if you run more than one tracker at the same time. +/// if you run more than one tracker at the same time. /// /// # Panics /// /// Will panic if it can't convert the temp file path to string #[must_use] pub fn ephemeral() -> Configuration { + // todo: disable services that are not needed. + // For example: a test for the UDP tracker should disable the API and HTTP tracker. + let mut config = Configuration { - log_level: Some("off".to_owned()), + log_level: Some("off".to_owned()), // Change to `debug` for tests debugging ..Default::default() }; - // Ephemeral socket addresses - let bind_addr = "127.0.0.1:0".to_string(); + // Ephemeral socket address for API + let api_port = 0u16; + config.http_api.enabled = true; + config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + + // Ephemeral socket address for UDP tracker + let udp_port = 0u16; + config.udp_trackers[0].enabled = true; + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &udp_port); - config.http_api.bind_address = bind_addr.to_string(); - config.udp_trackers[0].bind_address = bind_addr; + // Ephemeral socket address for HTTP tracker + let http_port = 0u16; + config.http_trackers[0].enabled = true; + config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); // Ephemeral sqlite database let temp_directory = env::temp_dir(); let random_db_id = random::string(16); let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); - config.db_path = temp_file.to_str().unwrap().to_owned(); config diff --git a/tests/http/server.rs b/tests/http/server.rs index 147ad93c1..920c01f07 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -8,28 +8,29 @@ use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; -use torrust_tracker_configuration::{ephemeral_configuration, Configuration}; +use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; +use torrust_tracker_test_helpers::configuration::ephemeral; use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" in settings pub async fn start_public_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.mode = TrackerMode::Public; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "listed" in settings pub async fn start_whitelisted_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.mode = TrackerMode::Listed; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "private" in settings pub async fn start_private_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.mode = TrackerMode::Private; start_custom_http_tracker(Arc::new(configuration), version).await } @@ -42,7 +43,7 @@ pub async fn start_private_http_tracker(version: Version) -> Server { /// bind_address = "[::]:7070" /// ``` pub async fn start_ipv6_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); // Change socket address to "wildcard address" (unspecified address which means any IP address) // but keeping the random port generated with the ephemeral configuration. @@ -60,7 +61,7 @@ pub async fn start_ipv6_http_tracker(version: Version) -> Server { /// external_ip = "2.137.87.41" /// ``` pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.external_ip = Some(external_ip.to_string()); start_custom_http_tracker(Arc::new(configuration), version).await } @@ -72,7 +73,7 @@ pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: /// on_reverse_proxy = true /// ``` pub async fn start_http_tracker_on_reverse_proxy(version: Version) -> Server { - let mut configuration = ephemeral_configuration(); + let mut configuration = ephemeral(); configuration.on_reverse_proxy = true; start_custom_http_tracker(Arc::new(configuration), version).await } @@ -83,7 +84,7 @@ pub async fn start_default_http_tracker(version: Version) -> Server { } pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral_configuration()) + Arc::new(ephemeral()) } pub async fn start_custom_http_tracker(configuration: Arc, version: Version) -> Server { From 504bad32eb7ff7d0aca7fafdc2fb18ab3020f005 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Mar 2023 16:22:06 +0100 Subject: [PATCH 388/435] refactor: abstract away http server implementations --- packages/configuration/src/lib.rs | 2 +- src/http/axum_implementation/server.rs | 114 +++++++++++++++++++++++-- src/http/mod.rs | 1 + src/http/tracker_interface.rs | 100 ++++++++++++++++++++++ src/http/warp_implementation/server.rs | 70 +++++++++++++++ tests/api/test_environment.rs | 30 +------ tests/common/mod.rs | 1 + tests/common/tracker.rs | 34 ++++++++ tests/http/mod.rs | 1 + tests/http/test_environment.rs | 102 ++++++++++++++++++++++ tests/http_tracker.rs | 24 ++++++ tests/udp/test_environment.rs | 43 ++-------- 12 files changed, 452 insertions(+), 70 deletions(-) create mode 100644 src/http/tracker_interface.rs create mode 100644 tests/common/tracker.rs create mode 100644 tests/http/test_environment.rs diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 1003a92db..d42c82df9 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -21,7 +21,7 @@ pub struct UdpTracker { } #[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct HttpTracker { pub enabled: bool, pub bind_address: String, diff --git a/src/http/axum_implementation/server.rs b/src/http/axum_implementation/server.rs index 30c580af6..f2a7371be 100644 --- a/src/http/axum_implementation/server.rs +++ b/src/http/axum_implementation/server.rs @@ -1,19 +1,123 @@ +use std::future::Future; use std::net::SocketAddr; +use std::str::FromStr; use std::sync::Arc; +use async_trait::async_trait; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; -use futures::Future; +use futures::future::BoxFuture; use log::info; use warp::hyper; use super::routes::router; +use crate::http::tracker_interface::TrackerInterfaceTrait; use crate::tracker::Tracker; -pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { +#[derive(Debug)] +pub enum Error { + Error(String), +} + +pub struct Server; + +impl Server { + pub fn start_from_tcp_listener_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(&tracker); + + Box::pin(async { + axum::Server::from_tcp(tcp_listener) + .expect("Could not bind to tcp listener.") + .serve(app.into_make_service()) + .with_graceful_shutdown(shutdown_signal) + .await + .expect("Axum server crashed."); + }) + } + + pub fn start_tls_from_tcp_listener_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + (ssl_cert_path, ssl_key_path): (String, String), + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(&tracker); + + let handle = Handle::new(); + + let cloned_handle = handle.clone(); + + tokio::task::spawn_local(async move { + shutdown_signal.await; + cloned_handle.shutdown(); + }); + + Box::pin(async { + let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) + .await + .expect("Could not read tls cert."); + + axum_server::from_tcp_rustls(tcp_listener, tls_config) + .handle(handle) + .serve(app.into_make_service()) + .await + .expect("Axum server crashed."); + }) + } +} + +#[async_trait] +impl TrackerInterfaceTrait for Server { + fn new() -> Self { + Self {} + } + + fn start_with_graceful_shutdown( + &self, + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); + let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); + let bind_addr = tcp_listener + .local_addr() + .expect("Could not get local_addr from tcp_listener."); + + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { + let server = Self::start_tls_from_tcp_listener_with_graceful_shutdown( + tcp_listener, + (ssl_cert_path.to_string(), ssl_key_path.to_string()), + tracker, + shutdown_signal, + ); + + (bind_addr, server) + } else { + let server = Self::start_from_tcp_listener_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); + + (bind_addr, server) + } + } +} + +pub fn start(socket_addr: std::net::SocketAddr, tracker: &Arc) -> impl Future> { let app = router(tracker); - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); server.with_graceful_shutdown(async move { tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); @@ -22,7 +126,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future, ) -> impl Future> { @@ -39,5 +143,5 @@ pub fn start_tls( axum_server::bind_rustls(socket_addr, ssl_config) .handle(handle) - .serve(app.into_make_service_with_connect_info::()) + .serve(app.into_make_service_with_connect_info::()) } diff --git a/src/http/mod.rs b/src/http/mod.rs index 039a2067b..c2cbb43df 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -14,6 +14,7 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; pub mod percent_encoding; +pub mod tracker_interface; pub mod warp_implementation; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] diff --git a/src/http/tracker_interface.rs b/src/http/tracker_interface.rs new file mode 100644 index 000000000..033d5a75d --- /dev/null +++ b/src/http/tracker_interface.rs @@ -0,0 +1,100 @@ +use std::future::Future; +use std::net::SocketAddr; +use std::sync::Arc; + +use futures::future::BoxFuture; + +use crate::signals::shutdown_signal; +use crate::tracker::Tracker; + +/// Trait to be implemented by a http interface for the tracker. +#[allow(clippy::module_name_repetitions)] +pub trait TrackerInterfaceTrait: Sync + Send { + fn new() -> Self; + + fn start_with_graceful_shutdown( + &self, + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static; +} + +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[allow(clippy::module_name_repetitions)] +pub type StoppedHttpServer = TrackerInterface>; +#[allow(clippy::module_name_repetitions)] +pub type RunningHttpServer = TrackerInterface>; + +pub struct TrackerInterface { + cfg: torrust_tracker_configuration::HttpTracker, + state: S, +} + +pub struct Stopped { + interface: I, +} + +pub struct Running { + bind_addr: SocketAddr, + task_killer: tokio::sync::oneshot::Sender, + task: tokio::task::JoinHandle, +} + +impl TrackerInterface> { + pub fn new(cfg: torrust_tracker_configuration::HttpTracker, interface: I) -> Self { + Self { + cfg, + state: Stopped { interface }, + } + } + + pub async fn start(self, tracker: Arc) -> Result>, Error> { + let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); + let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + + let configuration = self.cfg.clone(); + let interface = self.state.interface; + + let task = tokio::spawn(async move { + let (bind_addr, server) = + interface.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); + + addr_sender.send(bind_addr).unwrap(); + + server.await; + + interface + }); + + let bind_address = addr_receiver.await.expect("Could not receive bind_address."); + + Ok(TrackerInterface { + cfg: self.cfg, + state: Running { + bind_addr: bind_address, + task_killer: shutdown_sender, + task, + }, + }) + } +} + +impl TrackerInterface> { + pub async fn stop(self) -> Result>, Error> { + self.state.task_killer.send(0).unwrap(); + + let interface = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; + + Ok(TrackerInterface { + cfg: self.cfg, + state: Stopped { interface }, + }) + } +} diff --git a/src/http/warp_implementation/server.rs b/src/http/warp_implementation/server.rs index 894d3e911..6b0665fce 100644 --- a/src/http/warp_implementation/server.rs +++ b/src/http/warp_implementation/server.rs @@ -1,8 +1,78 @@ +use std::future::Future; use std::net::SocketAddr; +use std::str::FromStr; use std::sync::Arc; +use futures::future::BoxFuture; + use super::routes; +use crate::http::tracker_interface::TrackerInterfaceTrait; use crate::tracker; +use crate::tracker::Tracker; + +#[derive(Debug)] +pub enum Error { + Error(String), +} + +pub struct Server; + +impl Server { + pub fn start_with_graceful_shutdown( + addr: SocketAddr, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let (bind_addr, server) = warp::serve(routes::routes(tracker)).bind_with_graceful_shutdown(addr, shutdown_signal); + + (bind_addr, Box::pin(server)) + } + + pub fn start_tls_with_graceful_shutdown( + addr: SocketAddr, + (ssl_cert_path, ssl_key_path): (&str, &str), + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let (bind_addr, server) = warp::serve(routes::routes(tracker)) + .tls() + .cert_path(ssl_cert_path) + .key_path(ssl_key_path) + .bind_with_graceful_shutdown(addr, shutdown_signal); + + (bind_addr, Box::pin(server)) + } +} + +impl TrackerInterfaceTrait for Server { + fn new() -> Self { + Self {} + } + + fn start_with_graceful_shutdown( + &self, + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); + + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { + Self::start_tls_with_graceful_shutdown(addr, (ssl_cert_path, ssl_key_path), tracker, shutdown_signal) + } else { + Self::start_with_graceful_shutdown(addr, tracker, shutdown_signal) + } + } +} /// Server that listens on HTTP, needs a `tracker::TorrentTracker` #[derive(Clone)] diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index ff143ec7a..78ff7d259 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -11,6 +11,7 @@ use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration::ephemeral; use super::connection_info::ConnectionInfo; +use crate::common::tracker::{tracker_configuration, tracker_instance}; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -92,35 +93,6 @@ pub fn running_test_environment() -> RunningTestEnvironment { TestEnvironment::new_running() } -pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) -} - -// TODO: Move to test-helpers crate once `Tracker` is isolated. -pub fn tracker_instance(configuration: &Arc) -> Arc { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - tracker -} - pub fn api_server() -> StoppedApiServer { let config = tracker_configuration(); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b57996292..9452cc111 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,3 +1,4 @@ pub mod fixtures; pub mod http; +pub mod tracker; pub mod udp; diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs new file mode 100644 index 000000000..c0e44749b --- /dev/null +++ b/tests/common/tracker.rs @@ -0,0 +1,34 @@ +use std::sync::Arc; + +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::Tracker; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; + +pub fn tracker_configuration() -> Arc { + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) +} + +// TODO: Move to test-helpers crate once `Tracker` is isolated. +pub fn tracker_instance(configuration: &Arc) -> Arc { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + logging::setup(configuration); + + tracker +} diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 40616025b..a335723e3 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -5,6 +5,7 @@ pub mod connection_info; pub mod requests; pub mod responses; pub mod server; +pub mod test_environment; use percent_encoding::NON_ALPHANUMERIC; diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs new file mode 100644 index 000000000..acf0224ef --- /dev/null +++ b/tests/http/test_environment.rs @@ -0,0 +1,102 @@ +use core::panic; +use std::sync::Arc; + +use torrust_tracker::http::tracker_interface::{RunningHttpServer, StoppedHttpServer, TrackerInterface, TrackerInterfaceTrait}; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::Tracker; +use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; +use torrust_tracker_configuration::Configuration; +use torrust_tracker_test_helpers::configuration::ephemeral; + +use crate::common::tracker::{tracker_configuration, tracker_instance}; + +#[allow(clippy::module_name_repetitions, dead_code)] +pub type StoppedTestEnvironment = TestEnvironment>; +#[allow(clippy::module_name_repetitions)] +pub type RunningTestEnvironment = TestEnvironment>; + +pub struct TestEnvironment { + pub tracker: Arc, + pub state: S, +} + +#[allow(dead_code)] +pub struct Stopped { + http_server: StoppedHttpServer, +} + +pub struct Running { + http_server: RunningHttpServer, +} + +impl TestEnvironment { + /// Add a torrent to the tracker + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl TestEnvironment> { + #[allow(dead_code)] + pub fn new_stopped() -> Self { + let cfg = tracker_configuration(); + + let tracker = tracker_instance(&cfg); + + let http_server = stopped_http_server(cfg.http_trackers[0].clone()); + + Self { + tracker, + state: Stopped { http_server }, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> TestEnvironment> { + TestEnvironment { + tracker: self.tracker.clone(), + state: Running { + http_server: self.state.http_server.start(self.tracker).await.unwrap(), + }, + } + } +} + +impl TestEnvironment> { + pub async fn new_running() -> Self { + let test_env = StoppedTestEnvironment::new_stopped(); + + test_env.start().await + } + + pub async fn stop(self) -> TestEnvironment> { + TestEnvironment { + tracker: self.tracker, + state: Stopped { + http_server: self.state.http_server.stop().await.unwrap(), + }, + } + } +} + +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment() -> RunningTestEnvironment { + TestEnvironment::new_running().await +} + +pub fn stopped_http_server( + cfg: torrust_tracker_configuration::HttpTracker, +) -> StoppedHttpServer { + let http_server = I::new(); + + TrackerInterface::new(cfg, http_server) +} + +pub async fn running_http_server( + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, +) -> RunningHttpServer { + stopped_http_server(cfg).start(tracker).await.unwrap() +} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 4219be30a..fd9adee34 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -12,6 +12,30 @@ mod common; mod http; +pub type Axum = torrust_tracker::http::axum_implementation::server::Server; +pub type Warp = torrust_tracker::http::warp_implementation::server::Server; + +mod http_tracker_test_environment { + use torrust_tracker::http; + + use crate::http::test_environment::running_test_environment; + use crate::{Axum, Warp}; + + #[tokio::test] + async fn should_be_able_to_start_and_stop_a_test_environment_using_axum() { + let test_env = running_test_environment::().await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_be_able_to_start_and_stop_a_test_environment_using_warp() { + let test_env = running_test_environment::().await; + + test_env.stop().await; + } +} + mod warp_http_tracker_server { mod for_all_config_modes { diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index e53a7a580..dffe458dd 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -10,9 +10,7 @@ use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration::ephemeral; -fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) -} +use crate::common::tracker::{tracker_configuration, tracker_instance}; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -26,11 +24,11 @@ pub struct TestEnvironment { #[allow(dead_code)] pub struct Stopped { - api_server: StoppedUdpServer, + udp_server: StoppedUdpServer, } pub struct Running { - api_server: RunningUdpServer, + udp_server: RunningUdpServer, } impl TestEnvironment { @@ -48,7 +46,7 @@ impl TestEnvironment { Self { tracker: udp_server.tracker.clone(), - state: Stopped { api_server: udp_server }, + state: Stopped { udp_server: udp_server }, } } @@ -57,7 +55,7 @@ impl TestEnvironment { TestEnvironment { tracker: self.tracker, state: Running { - api_server: self.state.api_server.start().await.unwrap(), + udp_server: self.state.udp_server.start().await.unwrap(), }, } } @@ -69,7 +67,7 @@ impl TestEnvironment { Self { tracker: udp_server.tracker.clone(), - state: Running { api_server: udp_server }, + state: Running { udp_server: udp_server }, } } @@ -78,13 +76,13 @@ impl TestEnvironment { TestEnvironment { tracker: self.tracker, state: Stopped { - api_server: self.state.api_server.stop().await.unwrap(), + udp_server: self.state.udp_server.stop().await.unwrap(), }, } } pub fn bind_address(&self) -> SocketAddr { - self.state.api_server.state.bind_address + self.state.udp_server.state.bind_address } } @@ -93,31 +91,6 @@ pub async fn running_test_environment() -> RunningTestEnvironment { TestEnvironment::new_running().await } -// TODO: Move to test-helpers crate once `Tracker` is isolated. -pub fn tracker_instance(configuration: &Arc) -> Arc { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - tracker -} - pub fn udp_server() -> StoppedUdpServer { let config = tracker_configuration(); From 191fbac1353cb64bb39b5bb7239334410ba4d6e0 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Mar 2023 16:43:18 +0100 Subject: [PATCH 389/435] refactor: moved signals mod to own file --- src/lib.rs | 45 +-------------------------------------------- src/signals.rs | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 44 deletions(-) create mode 100644 src/signals.rs diff --git a/src/lib.rs b/src/lib.rs index f80bcfb6c..f01ff0468 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,6 +5,7 @@ pub mod jobs; pub mod logging; pub mod protocol; pub mod setup; +pub mod signals; pub mod stats; pub mod tracker; pub mod udp; @@ -30,47 +31,3 @@ pub mod ephemeral_instance_keys { pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); } } - -pub mod signals { - use log::info; - - /// Resolves on `ctrl_c` or the `terminate` signal. - pub async fn global_shutdown_signal() { - let ctrl_c = async { - tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); - }; - - #[cfg(unix)] - let terminate = async { - tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") - .recv() - .await; - }; - - #[cfg(not(unix))] - let terminate = std::future::pending::<()>(); - - tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {} - } - } - - /// Resolves when the `stop_receiver` or the `global_shutdown_signal()` resolves. - pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { - let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; - - tokio::select! { - _ = stop => {}, - _ = global_shutdown_signal() => {} - } - } - - /// Same as `shutdown_signal()`, but shows a message when it resolves. - pub async fn shutdown_signal_with_message(stop_receiver: tokio::sync::oneshot::Receiver, message: String) { - shutdown_signal(stop_receiver).await; - - info!("{message}"); - } -} diff --git a/src/signals.rs b/src/signals.rs new file mode 100644 index 000000000..b5a25ded7 --- /dev/null +++ b/src/signals.rs @@ -0,0 +1,41 @@ +use log::info; + +/// Resolves on `ctrl_c` or the `terminate` signal. +pub async fn global_shutdown_signal() { + let ctrl_c = async { + tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {} + } +} + +/// Resolves when the `stop_receiver` or the `global_shutdown_signal()` resolves. +pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { + let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; + + tokio::select! { + _ = stop => {}, + _ = global_shutdown_signal() => {} + } +} + +/// Same as `shutdown_signal()`, but shows a message when it resolves. +pub async fn shutdown_signal_with_message(stop_receiver: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal(stop_receiver).await; + + info!("{message}"); +} From f40e43f8cb0433aa81e6e27cf056d4df5c4fe39d Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 1 Mar 2023 17:16:38 +0100 Subject: [PATCH 390/435] refactor: prepend paths to ephemeral calls --- src/apis/server.rs | 3 +-- src/tracker/services/statistics.rs | 3 +-- src/tracker/services/torrent.rs | 6 ++---- src/udp/handlers.rs | 3 +-- tests/api/test_environment.rs | 5 ----- tests/http/server.rs | 15 +++++++-------- tests/http/test_environment.rs | 5 ----- tests/http_tracker.rs | 2 -- tests/udp/test_environment.rs | 4 ---- 9 files changed, 12 insertions(+), 34 deletions(-) diff --git a/src/apis/server.rs b/src/apis/server.rs index 5ec22f253..4594456fb 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -188,14 +188,13 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::apis::server::ApiServer; use crate::tracker; use crate::tracker::statistics; fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } #[tokio::test] diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index c0aaf9c64..94a9b1bd5 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -37,14 +37,13 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::tracker; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } #[tokio::test] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index ce652a091..fc5686e23 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -138,7 +138,6 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; @@ -146,7 +145,7 @@ mod tests { use crate::tracker::services::torrent::{get_torrent_info, Info}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } #[tokio::test] @@ -193,7 +192,6 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; @@ -201,7 +199,7 @@ mod tests { use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } #[tokio::test] diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 4a0874c72..411590d2f 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -252,7 +252,6 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; - use torrust_tracker_test_helpers::configuration::ephemeral; use crate::protocol::clock::{Current, Time}; use crate::tracker::{self, peer, statistics}; @@ -262,7 +261,7 @@ mod tests { } fn default_testing_tracker_configuration() -> Configuration { - ephemeral() + torrust_tracker_test_helpers::configuration::ephemeral() } fn initialized_public_tracker() -> Arc { diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index 78ff7d259..4f119fd64 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -1,14 +1,9 @@ -use core::panic; use std::sync::Arc; use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::Tracker; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_test_helpers::configuration::ephemeral; use super::connection_info::ConnectionInfo; use crate::common::tracker::{tracker_configuration, tracker_instance}; diff --git a/tests/http/server.rs b/tests/http/server.rs index 920c01f07..4753ee4dc 100644 --- a/tests/http/server.rs +++ b/tests/http/server.rs @@ -10,27 +10,26 @@ use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; -use torrust_tracker_test_helpers::configuration::ephemeral; use super::connection_info::ConnectionInfo; /// Starts a HTTP tracker with mode "public" in settings pub async fn start_public_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.mode = TrackerMode::Public; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "listed" in settings pub async fn start_whitelisted_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.mode = TrackerMode::Listed; start_custom_http_tracker(Arc::new(configuration), version).await } /// Starts a HTTP tracker with mode "private" in settings pub async fn start_private_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.mode = TrackerMode::Private; start_custom_http_tracker(Arc::new(configuration), version).await } @@ -43,7 +42,7 @@ pub async fn start_private_http_tracker(version: Version) -> Server { /// bind_address = "[::]:7070" /// ``` pub async fn start_ipv6_http_tracker(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); // Change socket address to "wildcard address" (unspecified address which means any IP address) // but keeping the random port generated with the ephemeral configuration. @@ -61,7 +60,7 @@ pub async fn start_ipv6_http_tracker(version: Version) -> Server { /// external_ip = "2.137.87.41" /// ``` pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.external_ip = Some(external_ip.to_string()); start_custom_http_tracker(Arc::new(configuration), version).await } @@ -73,7 +72,7 @@ pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: /// on_reverse_proxy = true /// ``` pub async fn start_http_tracker_on_reverse_proxy(version: Version) -> Server { - let mut configuration = ephemeral(); + let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); configuration.on_reverse_proxy = true; start_custom_http_tracker(Arc::new(configuration), version).await } @@ -84,7 +83,7 @@ pub async fn start_default_http_tracker(version: Version) -> Server { } pub fn tracker_configuration() -> Arc { - Arc::new(ephemeral()) + Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) } pub async fn start_custom_http_tracker(configuration: Arc, version: Version) -> Server { diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index acf0224ef..f6770b2ad 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -1,14 +1,9 @@ -use core::panic; use std::sync::Arc; use torrust_tracker::http::tracker_interface::{RunningHttpServer, StoppedHttpServer, TrackerInterface, TrackerInterfaceTrait}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::Tracker; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_test_helpers::configuration::ephemeral; use crate::common::tracker::{tracker_configuration, tracker_instance}; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index fd9adee34..c0d0bdd23 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -16,8 +16,6 @@ pub type Axum = torrust_tracker::http::axum_implementation::server::Server; pub type Warp = torrust_tracker::http::warp_implementation::server::Server; mod http_tracker_test_environment { - use torrust_tracker::http; - use crate::http::test_environment::running_test_environment; use crate::{Axum, Warp}; diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index dffe458dd..585a8f934 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -3,12 +3,8 @@ use std::sync::Arc; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::Tracker; use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_test_helpers::configuration::ephemeral; use crate::common::tracker::{tracker_configuration, tracker_instance}; From 4f2b035a36901fa3f8ec52537923bb0739080e19 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Mar 2023 14:45:07 +0100 Subject: [PATCH 391/435] refactor: renamed `TrackerInterface` struct and relevant trait --- src/http/axum_implementation/server.rs | 4 +-- src/http/tracker_interface.rs | 42 +++++++++++++------------- src/http/warp_implementation/server.rs | 4 +-- tests/http/test_environment.rs | 23 ++++++++------ 4 files changed, 39 insertions(+), 34 deletions(-) diff --git a/src/http/axum_implementation/server.rs b/src/http/axum_implementation/server.rs index f2a7371be..a12d60332 100644 --- a/src/http/axum_implementation/server.rs +++ b/src/http/axum_implementation/server.rs @@ -11,7 +11,7 @@ use log::info; use warp::hyper; use super::routes::router; -use crate::http::tracker_interface::TrackerInterfaceTrait; +use crate::http::tracker_interface::HttpServerLauncher; use crate::tracker::Tracker; #[derive(Debug)] @@ -77,7 +77,7 @@ impl Server { } #[async_trait] -impl TrackerInterfaceTrait for Server { +impl HttpServerLauncher for Server { fn new() -> Self { Self {} } diff --git a/src/http/tracker_interface.rs b/src/http/tracker_interface.rs index 033d5a75d..a8bb057d7 100644 --- a/src/http/tracker_interface.rs +++ b/src/http/tracker_interface.rs @@ -7,9 +7,9 @@ use futures::future::BoxFuture; use crate::signals::shutdown_signal; use crate::tracker::Tracker; -/// Trait to be implemented by a http interface for the tracker. +/// Trait to be implemented by a http server launcher for the tracker. #[allow(clippy::module_name_repetitions)] -pub trait TrackerInterfaceTrait: Sync + Send { +pub trait HttpServerLauncher: Sync + Send { fn new() -> Self; fn start_with_graceful_shutdown( @@ -28,54 +28,54 @@ pub enum Error { } #[allow(clippy::module_name_repetitions)] -pub type StoppedHttpServer = TrackerInterface>; +pub type StoppedHttpServer = HttpServer>; #[allow(clippy::module_name_repetitions)] -pub type RunningHttpServer = TrackerInterface>; +pub type RunningHttpServer = HttpServer>; -pub struct TrackerInterface { +pub struct HttpServer { cfg: torrust_tracker_configuration::HttpTracker, state: S, } -pub struct Stopped { - interface: I, +pub struct Stopped { + launcher: I, } -pub struct Running { +pub struct Running { bind_addr: SocketAddr, task_killer: tokio::sync::oneshot::Sender, task: tokio::task::JoinHandle, } -impl TrackerInterface> { - pub fn new(cfg: torrust_tracker_configuration::HttpTracker, interface: I) -> Self { +impl HttpServer> { + pub fn new(cfg: torrust_tracker_configuration::HttpTracker, launcher: I) -> Self { Self { cfg, - state: Stopped { interface }, + state: Stopped { launcher }, } } - pub async fn start(self, tracker: Arc) -> Result>, Error> { + pub async fn start(self, tracker: Arc) -> Result>, Error> { let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); let configuration = self.cfg.clone(); - let interface = self.state.interface; + let launcher = self.state.launcher; let task = tokio::spawn(async move { let (bind_addr, server) = - interface.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); + launcher.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); addr_sender.send(bind_addr).unwrap(); server.await; - interface + launcher }); let bind_address = addr_receiver.await.expect("Could not receive bind_address."); - Ok(TrackerInterface { + Ok(HttpServer { cfg: self.cfg, state: Running { bind_addr: bind_address, @@ -86,15 +86,15 @@ impl TrackerInterface> { } } -impl TrackerInterface> { - pub async fn stop(self) -> Result>, Error> { +impl HttpServer> { + pub async fn stop(self) -> Result>, Error> { self.state.task_killer.send(0).unwrap(); - let interface = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; + let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; - Ok(TrackerInterface { + Ok(HttpServer { cfg: self.cfg, - state: Stopped { interface }, + state: Stopped { launcher }, }) } } diff --git a/src/http/warp_implementation/server.rs b/src/http/warp_implementation/server.rs index 6b0665fce..8d01559f3 100644 --- a/src/http/warp_implementation/server.rs +++ b/src/http/warp_implementation/server.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use futures::future::BoxFuture; use super::routes; -use crate::http::tracker_interface::TrackerInterfaceTrait; +use crate::http::tracker_interface::HttpServerLauncher; use crate::tracker; use crate::tracker::Tracker; @@ -50,7 +50,7 @@ impl Server { } } -impl TrackerInterfaceTrait for Server { +impl HttpServerLauncher for Server { fn new() -> Self { Self {} } diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index f6770b2ad..87232f79b 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use torrust_tracker::http::tracker_interface::{RunningHttpServer, StoppedHttpServer, TrackerInterface, TrackerInterfaceTrait}; +use torrust_tracker::http::tracker_interface::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; @@ -18,11 +18,11 @@ pub struct TestEnvironment { } #[allow(dead_code)] -pub struct Stopped { +pub struct Stopped { http_server: StoppedHttpServer, } -pub struct Running { +pub struct Running { http_server: RunningHttpServer, } @@ -33,7 +33,7 @@ impl TestEnvironment { } } -impl TestEnvironment> { +impl TestEnvironment> { #[allow(dead_code)] pub fn new_stopped() -> Self { let cfg = tracker_configuration(); @@ -59,7 +59,7 @@ impl TestEnvironment> { } } -impl TestEnvironment> { +impl TestEnvironment> { pub async fn new_running() -> Self { let test_env = StoppedTestEnvironment::new_stopped(); @@ -77,19 +77,24 @@ impl TestEnvironment> { } #[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment() -> RunningTestEnvironment { +pub async fn stopped_test_environment() -> StoppedTestEnvironment { + TestEnvironment::new_stopped().await +} + +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment() -> RunningTestEnvironment { TestEnvironment::new_running().await } -pub fn stopped_http_server( +pub fn stopped_http_server( cfg: torrust_tracker_configuration::HttpTracker, ) -> StoppedHttpServer { let http_server = I::new(); - TrackerInterface::new(cfg, http_server) + HttpServer::new(cfg, http_server) } -pub async fn running_http_server( +pub async fn running_http_server( cfg: torrust_tracker_configuration::HttpTracker, tracker: Arc, ) -> RunningHttpServer { From d914e5c0fe263b433550d7d1d75ed79d26afb843 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 2 Mar 2023 20:07:16 +0100 Subject: [PATCH 392/435] refactor: replaced test http servers with the new test environments --- Cargo.lock | 1 + packages/test-helpers/Cargo.toml | 1 + packages/test-helpers/src/configuration.rs | 69 ++ src/http/tracker_interface.rs | 6 +- tests/api/test_environment.rs | 6 +- tests/common/http.rs | 5 - tests/common/tracker.rs | 10 +- tests/http/client.rs | 17 +- tests/http/mod.rs | 2 - tests/http/server.rs | 137 ---- tests/http/test_environment.rs | 44 +- tests/http_tracker.rs | 754 +++++++++++---------- tests/udp/test_environment.rs | 6 +- 13 files changed, 532 insertions(+), 526 deletions(-) delete mode 100644 tests/http/server.rs diff --git a/Cargo.lock b/Cargo.lock index 9045b7c47..ce179501f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3019,6 +3019,7 @@ dependencies = [ "rand", "tokio", "torrust-tracker-configuration", + "torrust-tracker-primitives", ] [[package]] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 5be0e8aba..2f942bac7 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -9,3 +9,4 @@ tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", lazy_static = "1.4" rand = "0.8.5" torrust-tracker-configuration = { path = "../configuration"} +torrust-tracker-primitives = { path = "../primitives"} diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index a978a050b..ec29fdbe1 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -1,6 +1,8 @@ use std::env; +use std::net::IpAddr; use torrust_tracker_configuration::Configuration; +use torrust_tracker_primitives::TrackerMode; use crate::random; @@ -43,3 +45,70 @@ pub fn ephemeral() -> Configuration { config } + +#[must_use] +pub fn ephemeral_with_reverse_proxy() -> Configuration { + let mut cfg = ephemeral(); + + cfg.on_reverse_proxy = true; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_public() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::Public; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_private() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::Private; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_whitelisted() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::Listed; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_private_whitelisted() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::PrivateListed; + + cfg +} + +#[must_use] +pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { + let mut cfg = ephemeral(); + + cfg.external_ip = Some(ip.to_string()); + + cfg +} + +#[must_use] +pub fn ephemeral_ipv6() -> Configuration { + let mut cfg = ephemeral(); + + let ipv6 = format!("[::]:{}", 0); + + cfg.http_api.bind_address = ipv6.clone(); + cfg.http_trackers[0].bind_address = ipv6.clone(); + cfg.udp_trackers[0].bind_address = ipv6; + + cfg +} diff --git a/src/http/tracker_interface.rs b/src/http/tracker_interface.rs index a8bb057d7..fc4ba9c95 100644 --- a/src/http/tracker_interface.rs +++ b/src/http/tracker_interface.rs @@ -33,8 +33,8 @@ pub type StoppedHttpServer = HttpServer>; pub type RunningHttpServer = HttpServer>; pub struct HttpServer { - cfg: torrust_tracker_configuration::HttpTracker, - state: S, + pub cfg: torrust_tracker_configuration::HttpTracker, + pub state: S, } pub struct Stopped { @@ -42,7 +42,7 @@ pub struct Stopped { } pub struct Running { - bind_addr: SocketAddr, + pub bind_addr: SocketAddr, task_killer: tokio::sync::oneshot::Sender, task: tokio::task::JoinHandle, } diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index 4f119fd64..1565530c1 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use super::connection_info::ConnectionInfo; -use crate::common::tracker::{tracker_configuration, tracker_instance}; +use crate::common::tracker::new_tracker; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -89,9 +89,9 @@ pub fn running_test_environment() -> RunningTestEnvironment { } pub fn api_server() -> StoppedApiServer { - let config = tracker_configuration(); + let config = Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()); - let tracker = tracker_instance(&config); + let tracker = new_tracker(config.clone()); ApiServer::new(config.http_api.clone(), tracker) } diff --git a/tests/common/http.rs b/tests/common/http.rs index 902752674..d682027fd 100644 --- a/tests/common/http.rs +++ b/tests/common/http.rs @@ -1,11 +1,6 @@ pub type ReqwestQuery = Vec; pub type ReqwestQueryParam = (String, String); -#[derive(Clone, Debug)] -pub struct ConnectionInfo { - pub bind_address: String, -} - /// URL Query component #[derive(Default, Debug)] pub struct Query { diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index c0e44749b..7451bbc36 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -4,12 +4,8 @@ use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::tracker::Tracker; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; -pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) -} - // TODO: Move to test-helpers crate once `Tracker` is isolated. -pub fn tracker_instance(configuration: &Arc) -> Arc { +pub fn new_tracker(configuration: Arc) -> Arc { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -20,7 +16,7 @@ pub fn tracker_instance(configuration: &Arc Arc::new(tracker), Err(error) => { panic!("{}", error) @@ -28,7 +24,7 @@ pub fn tracker_instance(configuration: &Arc, } @@ -23,26 +22,26 @@ pub struct Client { /// base url path query /// ``` impl Client { - pub fn new(connection_info: ConnectionInfo) -> Self { + pub fn new(server_addr: std::net::SocketAddr) -> Self { Self { - connection_info, + server_addr, reqwest_client: reqwest::Client::builder().build().unwrap(), key: None, } } /// Creates the new client binding it to an specific local address - pub fn bind(connection_info: ConnectionInfo, local_address: IpAddr) -> Self { + pub fn bind(server_addr: std::net::SocketAddr, local_address: IpAddr) -> Self { Self { - connection_info, + server_addr, reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), key: None, } } - pub fn authenticated(connection_info: ConnectionInfo, key: Key) -> Self { + pub fn authenticated(server_addr: std::net::SocketAddr, key: Key) -> Self { Self { - connection_info, + server_addr, reqwest_client: reqwest::Client::builder().build().unwrap(), key: Some(key), } @@ -95,6 +94,6 @@ impl Client { } fn base_url(&self) -> String { - format!("http://{}/", &self.connection_info.bind_address) + format!("http://{}/", &self.server_addr) } } diff --git a/tests/http/mod.rs b/tests/http/mod.rs index a335723e3..771145f46 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,10 +1,8 @@ pub mod asserts; pub mod asserts_warp; pub mod client; -pub mod connection_info; pub mod requests; pub mod responses; -pub mod server; pub mod test_environment; use percent_encoding::NON_ALPHANUMERIC; diff --git a/tests/http/server.rs b/tests/http/server.rs deleted file mode 100644 index 4753ee4dc..000000000 --- a/tests/http/server.rs +++ /dev/null @@ -1,137 +0,0 @@ -use core::panic; -use std::net::{IpAddr, SocketAddr}; -use std::sync::Arc; - -use torrust_tracker::http::Version; -use torrust_tracker::jobs::http_tracker; -use torrust_tracker::protocol::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::statistics::Keeper; -use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_primitives::TrackerMode; - -use super::connection_info::ConnectionInfo; - -/// Starts a HTTP tracker with mode "public" in settings -pub async fn start_public_http_tracker(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.mode = TrackerMode::Public; - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker with mode "listed" in settings -pub async fn start_whitelisted_http_tracker(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.mode = TrackerMode::Listed; - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker with mode "private" in settings -pub async fn start_private_http_tracker(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.mode = TrackerMode::Private; - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker with a wildcard IPV6 address. -/// The configuration in the `config.toml` file would be like this: -/// -/// ```text -/// [[http_trackers]] -/// bind_address = "[::]:7070" -/// ``` -pub async fn start_ipv6_http_tracker(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - - // Change socket address to "wildcard address" (unspecified address which means any IP address) - // but keeping the random port generated with the ephemeral configuration. - let socket_addr: SocketAddr = configuration.http_trackers[0].bind_address.parse().unwrap(); - let new_ipv6_socket_address = format!("[::]:{}", socket_addr.port()); - configuration.http_trackers[0].bind_address = new_ipv6_socket_address; - - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker with an specific `external_ip`. -/// The configuration in the `config.toml` file would be like this: -/// -/// ```text -/// external_ip = "2.137.87.41" -/// ``` -pub async fn start_http_tracker_with_external_ip(external_ip: &IpAddr, version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.external_ip = Some(external_ip.to_string()); - start_custom_http_tracker(Arc::new(configuration), version).await -} - -/// Starts a HTTP tracker `on_reverse_proxy`. -/// The configuration in the `config.toml` file would be like this: -/// -/// ```text -/// on_reverse_proxy = true -/// ``` -pub async fn start_http_tracker_on_reverse_proxy(version: Version) -> Server { - let mut configuration = torrust_tracker_test_helpers::configuration::ephemeral(); - configuration.on_reverse_proxy = true; - start_custom_http_tracker(Arc::new(configuration), version).await -} - -pub async fn start_default_http_tracker(version: Version) -> Server { - let configuration = tracker_configuration(); - start_custom_http_tracker(configuration.clone(), version).await -} - -pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) -} - -pub async fn start_custom_http_tracker(configuration: Arc, version: Version) -> Server { - let server = start(&configuration); - http_tracker::start_job(&configuration.http_trackers[0], server.tracker.clone(), version).await; - server -} - -fn start(configuration: &Arc) -> Server { - let connection_info = ConnectionInfo::anonymous(&configuration.http_trackers[0].bind_address.clone()); - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - logging::setup(configuration); - - Server { - tracker, - connection_info, - } -} - -pub struct Server { - pub tracker: Arc, - pub connection_info: ConnectionInfo, -} - -impl Server { - pub fn get_connection_info(&self) -> ConnectionInfo { - self.connection_info.clone() - } - - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index 87232f79b..a2cb4619c 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -5,7 +5,7 @@ use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::tracker::{tracker_configuration, tracker_instance}; +use crate::common::tracker::new_tracker; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment>; @@ -13,6 +13,7 @@ pub type StoppedTestEnvironment = TestEnvironment>; pub type RunningTestEnvironment = TestEnvironment>; pub struct TestEnvironment { + pub cfg: Arc, pub tracker: Arc, pub state: S, } @@ -35,14 +36,15 @@ impl TestEnvironment { impl TestEnvironment> { #[allow(dead_code)] - pub fn new_stopped() -> Self { - let cfg = tracker_configuration(); + pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { + let cfg = Arc::new(cfg); - let tracker = tracker_instance(&cfg); + let tracker = new_tracker(cfg.clone()); let http_server = stopped_http_server(cfg.http_trackers[0].clone()); Self { + cfg, tracker, state: Stopped { http_server }, } @@ -51,39 +53,61 @@ impl TestEnvironment> { #[allow(dead_code)] pub async fn start(self) -> TestEnvironment> { TestEnvironment { + cfg: self.cfg, tracker: self.tracker.clone(), state: Running { http_server: self.state.http_server.start(self.tracker).await.unwrap(), }, } } + + pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { + &self.state.http_server.cfg + } + + pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpTracker { + &mut self.state.http_server.cfg + } } impl TestEnvironment> { - pub async fn new_running() -> Self { - let test_env = StoppedTestEnvironment::new_stopped(); + pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { + let test_env = StoppedTestEnvironment::new_stopped(cfg); test_env.start().await } pub async fn stop(self) -> TestEnvironment> { TestEnvironment { + cfg: self.cfg, tracker: self.tracker, state: Stopped { http_server: self.state.http_server.stop().await.unwrap(), }, } } + + pub fn bind_address(&self) -> &std::net::SocketAddr { + &self.state.http_server.state.bind_addr + } + + pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { + &self.state.http_server.cfg + } } #[allow(clippy::module_name_repetitions)] -pub async fn stopped_test_environment() -> StoppedTestEnvironment { - TestEnvironment::new_stopped().await +pub fn stopped_test_environment( + cfg: torrust_tracker_configuration::Configuration, +) -> StoppedTestEnvironment { + TestEnvironment::new_stopped(cfg) } #[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment() -> RunningTestEnvironment { - TestEnvironment::new_running().await +pub async fn running_test_environment( + cfg: torrust_tracker_configuration::Configuration, +) -> RunningTestEnvironment { + TestEnvironment::new_running(cfg).await } pub fn stopped_http_server( diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index c0d0bdd23..22a6c44ff 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2,12 +2,12 @@ /// /// Warp version: /// ```text -/// cargo test `warp_http_tracker_server` -- --nocapture +/// cargo test `warp_test_env` -- --nocapture /// ``` /// /// Axum version (WIP): /// ```text -/// cargo test `warp_http_tracker_server` -- --nocapture +/// cargo test `warp_test_env` -- --nocapture /// ``` mod common; mod http; @@ -15,31 +15,30 @@ mod http; pub type Axum = torrust_tracker::http::axum_implementation::server::Server; pub type Warp = torrust_tracker::http::warp_implementation::server::Server; -mod http_tracker_test_environment { +mod test_env_test_environment { use crate::http::test_environment::running_test_environment; use crate::{Axum, Warp}; #[tokio::test] async fn should_be_able_to_start_and_stop_a_test_environment_using_axum() { - let test_env = running_test_environment::().await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; test_env.stop().await; } #[tokio::test] async fn should_be_able_to_start_and_stop_a_test_environment_using_warp() { - let test_env = running_test_environment::().await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; test_env.stop().await; } } -mod warp_http_tracker_server { +mod warp_test_env { mod for_all_config_modes { mod running_on_reverse_proxy { - use torrust_tracker::http::Version; use crate::http::asserts::{ assert_could_not_find_remote_address_on_xff_header_error_response, @@ -47,18 +46,21 @@ mod warp_http_tracker_server { }; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_http_tracker_on_reverse_proxy; + use crate::http::test_environment::{running_test_environment, stopped_test_environment}; + use crate::Warp; #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { // If the tracker is running behind a reverse proxy, the peer IP is the // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let params = QueryBuilder::default().query().params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -67,11 +69,13 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let params = QueryBuilder::default().query().params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; @@ -97,7 +101,6 @@ mod warp_http_tracker_server { use local_ip_address::local_ip; use reqwest::Response; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -113,20 +116,18 @@ mod warp_http_tracker_server { use crate::http::responses; use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList}; use crate::http::responses::announce_warp::{WarpAnnounce, WarpDictionaryPeer}; - use crate::http::server::{ - start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, - start_ipv6_http_tracker, start_public_http_tracker, - }; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); params.remove_optional_params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -135,16 +136,16 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; + let response = Client::new(test_env.bind_address().clone()).get("announce").await; assert_internal_server_error_response(response).await; } #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; // Without `info_hash` param @@ -152,7 +153,7 @@ mod warp_http_tracker_server { params.info_hash = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -164,7 +165,7 @@ mod warp_http_tracker_server { params.peer_id = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -176,7 +177,7 @@ mod warp_http_tracker_server { params.port = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -185,14 +186,14 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -207,13 +208,13 @@ mod warp_http_tracker_server { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -222,7 +223,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -231,7 +232,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -241,7 +242,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -250,7 +251,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -260,7 +261,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -276,7 +277,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -286,7 +287,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -295,7 +296,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -305,7 +306,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -314,7 +315,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -326,7 +327,7 @@ mod warp_http_tracker_server { async fn should_not_fail_when_the_event_param_is_invalid() { // All invalid values are ignored as if the `event` param were empty - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -343,7 +344,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -353,7 +354,7 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_not_fail_when_the_compact_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Warp).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -362,7 +363,7 @@ mod warp_http_tracker_server { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -372,9 +373,10 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -387,8 +389,8 @@ mod warp_http_tracker_server { &Announce { complete: 1, // the peer for this test incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![], }, ) @@ -397,7 +399,8 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -407,12 +410,10 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -427,8 +428,8 @@ mod warp_http_tracker_server { &WarpAnnounce { complete: 2, incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![WarpDictionaryPeer::from(previously_announced_peer)], }, ) @@ -437,13 +438,14 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); // Add a peer - http_tracker_server.add_torrent_peer(&info_hash, &peer).await; + test_env.add_torrent_peer(&info_hash, &peer).await; let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -452,9 +454,7 @@ mod warp_http_tracker_server { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&announce_query) - .await; + let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; assert_empty_announce_response(response).await; } @@ -464,7 +464,8 @@ mod warp_http_tracker_server { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -474,12 +475,10 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -505,7 +504,8 @@ mod warp_http_tracker_server { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -515,14 +515,12 @@ mod warp_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -543,26 +541,28 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); } #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; - Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); } @@ -571,9 +571,10 @@ mod warp_http_tracker_server { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -581,33 +582,35 @@ mod warp_http_tracker_server { ) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); } #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); } #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; - Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); } @@ -616,9 +619,10 @@ mod warp_http_tracker_server { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -626,19 +630,20 @@ mod warp_http_tracker_server { ) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); } #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -647,7 +652,7 @@ mod warp_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -663,14 +668,17 @@ mod warp_http_tracker_server { 127.0.0.1 external_ip = "2.137.87.41" */ - let http_tracker_server = - start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -679,10 +687,10 @@ mod warp_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } @@ -695,17 +703,17 @@ mod warp_http_tracker_server { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let http_tracker_server = start_http_tracker_with_external_ip( - &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - Version::Warp, - ) - .await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -714,10 +722,10 @@ mod warp_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } @@ -730,11 +738,13 @@ mod warp_http_tracker_server { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(http_tracker_server.get_connection_info()); + let client = Client::new(test_env.bind_address().clone()); let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); @@ -746,7 +756,7 @@ mod warp_http_tracker_server { ) .await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -766,7 +776,6 @@ mod warp_http_tracker_server { use std::net::IpAddr; use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -776,26 +785,29 @@ mod warp_http_tracker_server { use crate::http::requests; use crate::http::requests::scrape::QueryBuilder; use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_internal_server_error_response(response).await; } #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -806,11 +818,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -820,7 +833,7 @@ mod warp_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -844,11 +857,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -858,7 +872,7 @@ mod warp_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -882,11 +896,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -899,12 +914,13 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_accept_multiple_infohashes() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -923,11 +939,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let http_tracker = start_public_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(http_tracker.get_connection_info()) + Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -935,18 +952,19 @@ mod warp_http_tracker_server { ) .await; - let stats = http_tracker.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); } #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let http_tracker = start_ipv6_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(http_tracker.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -954,7 +972,7 @@ mod warp_http_tracker_server { ) .await; - let stats = http_tracker.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); } @@ -966,21 +984,23 @@ mod warp_http_tracker_server { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_whitelisted_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -990,17 +1010,19 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let http_tracker_server = start_whitelisted_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker_server + test_env .tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1011,7 +1033,6 @@ mod warp_http_tracker_server { mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -1020,15 +1041,18 @@ mod warp_http_tracker_server { use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_whitelisted_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1038,7 +1062,7 @@ mod warp_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1053,11 +1077,13 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let http_tracker = start_whitelisted_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1067,13 +1093,13 @@ mod warp_http_tracker_server { ) .await; - http_tracker + test_env .tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1103,7 +1129,6 @@ mod warp_http_tracker_server { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; @@ -1113,19 +1138,17 @@ mod warp_http_tracker_server { }; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_private_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_respond_to_authenticated_peers() { - let http_tracker_server = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; - let key = http_tracker_server - .tracker - .generate_auth_key(Duration::from_secs(60)) - .await - .unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) + let response = Client::authenticated(test_env.bind_address().clone(), key.id()) .announce(&QueryBuilder::default().query()) .await; @@ -1134,11 +1157,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let http_tracker_server = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1147,12 +1171,13 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_authentication_key_is_not_valid() { - let http_tracker_server = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key) + let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -1165,7 +1190,6 @@ mod warp_http_tracker_server { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; @@ -1175,15 +1199,17 @@ mod warp_http_tracker_server { use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_private_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Warp; #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let http_tracker = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1193,7 +1219,7 @@ mod warp_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1208,11 +1234,12 @@ mod warp_http_tracker_server { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let http_tracker = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1222,9 +1249,9 @@ mod warp_http_tracker_server { ) .await; - let key = http_tracker.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), key.id()) + let response = Client::authenticated(test_env.bind_address().clone(), key.id()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1250,11 +1277,12 @@ mod warp_http_tracker_server { async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { // There is not authentication error - let http_tracker = start_private_http_tracker(Version::Warp).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -1266,7 +1294,7 @@ mod warp_http_tracker_server { let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), false_key) + let response = Client::authenticated(test_env.bind_address().clone(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1289,30 +1317,32 @@ mod warp_http_tracker_server { } } -mod axum_http_tracker_server { +mod axum_test_env { // WIP: migration HTTP from Warp to Axum mod for_all_config_modes { mod and_running_on_reverse_proxy { - use torrust_tracker::http::Version; use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_http_tracker_on_reverse_proxy; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { // If the tracker is running behind a reverse proxy, the peer IP is the // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let params = QueryBuilder::default().query().params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1321,11 +1351,13 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let params = QueryBuilder::default().query().params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; @@ -1351,7 +1383,6 @@ mod axum_http_tracker_server { use local_ip_address::local_ip; use reqwest::Response; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -1366,20 +1397,18 @@ mod axum_http_tracker_server { use crate::http::requests::announce::{Compact, QueryBuilder}; use crate::http::responses; use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; - use crate::http::server::{ - start_default_http_tracker, start_http_tracker_on_reverse_proxy, start_http_tracker_with_external_ip, - start_ipv6_http_tracker, start_public_http_tracker, - }; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); params.remove_optional_params(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1388,20 +1417,20 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("announce").await; + let response = Client::new(test_env.bind_address().clone()).get("announce").await; assert_missing_query_params_for_announce_request_error_response(response).await; } #[tokio::test] async fn should_fail_when_url_query_parameters_are_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let invalid_query_param = "a=b=c"; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{invalid_query_param}")) .await; @@ -1410,7 +1439,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; // Without `info_hash` param @@ -1418,7 +1447,7 @@ mod axum_http_tracker_server { params.info_hash = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1430,7 +1459,7 @@ mod axum_http_tracker_server { params.peer_id = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1442,7 +1471,7 @@ mod axum_http_tracker_server { params.port = None; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1451,14 +1480,14 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1473,13 +1502,13 @@ mod axum_http_tracker_server { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1488,7 +1517,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1497,7 +1526,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1507,7 +1536,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1516,7 +1545,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1526,7 +1555,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1542,7 +1571,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1552,7 +1581,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1561,7 +1590,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1571,7 +1600,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1580,7 +1609,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1590,7 +1619,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_event_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1607,7 +1636,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1617,7 +1646,7 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_when_the_compact_param_is_invalid() { - let http_tracker_server = start_default_http_tracker(Version::Axum).await; + let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1626,7 +1655,7 @@ mod axum_http_tracker_server { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -1636,9 +1665,10 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -1651,8 +1681,8 @@ mod axum_http_tracker_server { &Announce { complete: 1, // the peer for this test incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![], }, ) @@ -1661,7 +1691,8 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1671,12 +1702,10 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1691,8 +1720,8 @@ mod axum_http_tracker_server { &Announce { complete: 2, incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![DictionaryPeer::from(previously_announced_peer)], }, ) @@ -1701,7 +1730,8 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1710,7 +1740,7 @@ mod axum_http_tracker_server { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); - http_tracker_server.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() @@ -1720,10 +1750,10 @@ mod axum_http_tracker_server { 8080, )) .build(); - http_tracker_server.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; // Announce the new Peer. - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1739,8 +1769,8 @@ mod axum_http_tracker_server { &Announce { complete: 3, incomplete: 0, - interval: http_tracker_server.tracker.config.announce_interval, - min_interval: http_tracker_server.tracker.config.min_announce_interval, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], }, ) @@ -1749,13 +1779,14 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); // Add a peer - http_tracker_server.add_torrent_peer(&info_hash, &peer).await; + test_env.add_torrent_peer(&info_hash, &peer).await; let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -1764,9 +1795,7 @@ mod axum_http_tracker_server { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(http_tracker_server.get_connection_info()) - .announce(&announce_query) - .await; + let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; assert_empty_announce_response(response).await; } @@ -1776,7 +1805,8 @@ mod axum_http_tracker_server { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1786,12 +1816,10 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1817,7 +1845,8 @@ mod axum_http_tracker_server { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1827,14 +1856,12 @@ mod axum_http_tracker_server { .build(); // Add the Peer 1 - http_tracker_server - .add_torrent_peer(&info_hash, &previously_announced_peer) - .await; + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1855,26 +1882,28 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); } #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; - Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); } @@ -1883,9 +1912,10 @@ mod axum_http_tracker_server { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -1893,33 +1923,35 @@ mod axum_http_tracker_server { ) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); } #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); } #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let http_tracker_server = start_ipv6_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; - Client::bind(http_tracker_server.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); } @@ -1928,9 +1960,10 @@ mod axum_http_tracker_server { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(http_tracker_server.get_connection_info()) + Client::new(test_env.bind_address().clone()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -1938,19 +1971,20 @@ mod axum_http_tracker_server { ) .await; - let stats = http_tracker_server.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); } #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -1959,7 +1993,7 @@ mod axum_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -1975,14 +2009,17 @@ mod axum_http_tracker_server { 127.0.0.1 external_ip = "2.137.87.41" */ - let http_tracker_server = - start_http_tracker_with_external_ip(&IpAddr::from_str("2.137.87.41").unwrap(), Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -1991,10 +2028,10 @@ mod axum_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } @@ -2007,17 +2044,17 @@ mod axum_http_tracker_server { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let http_tracker_server = start_http_tracker_with_external_ip( - &IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - Version::Axum, - ) - .await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(http_tracker_server.get_connection_info(), client_ip); + let client = Client::bind(test_env.bind_address().clone(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -2026,10 +2063,10 @@ mod axum_http_tracker_server { client.announce(&announce_query).await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), http_tracker_server.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); } @@ -2042,11 +2079,13 @@ mod axum_http_tracker_server { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let http_tracker_server = start_http_tracker_on_reverse_proxy(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(http_tracker_server.get_connection_info()); + let client = Client::new(test_env.bind_address().clone()); let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); @@ -2058,7 +2097,7 @@ mod axum_http_tracker_server { ) .await; - let peers = http_tracker_server.tracker.get_all_torrent_peers(&info_hash).await; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -2078,7 +2117,6 @@ mod axum_http_tracker_server { use std::net::IpAddr; use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -2091,26 +2129,30 @@ mod axum_http_tracker_server { use crate::http::requests; use crate::http::requests::scrape::QueryBuilder; use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::server::{start_ipv6_http_tracker, start_public_http_tracker}; + use crate::http::test_environment::running_test_environment; + use crate::Axum; - #[tokio::test] - async fn should_fail_when_the_url_query_component_is_empty() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; - let response = Client::new(http_tracker_server.get_connection_info()).get("scrape").await; + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; } #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let http_tracker_server = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!("announce?{params}")) .await; @@ -2120,11 +2162,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2134,7 +2177,7 @@ mod axum_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2158,11 +2201,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2172,7 +2216,7 @@ mod axum_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2196,11 +2240,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2213,12 +2258,13 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_accept_multiple_infohashes() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -2237,11 +2283,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let http_tracker = start_public_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(http_tracker.get_connection_info()) + Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2249,18 +2296,19 @@ mod axum_http_tracker_server { ) .await; - let stats = http_tracker.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); } #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let http_tracker = start_ipv6_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(http_tracker.get_connection_info(), IpAddr::from_str("::1").unwrap()) + Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2268,7 +2316,7 @@ mod axum_http_tracker_server { ) .await; - let stats = http_tracker.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); } @@ -2280,21 +2328,23 @@ mod axum_http_tracker_server { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_whitelisted_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2303,17 +2353,19 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let http_tracker_server = start_whitelisted_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker_server + test_env .tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2324,7 +2376,6 @@ mod axum_http_tracker_server { mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; @@ -2333,15 +2384,18 @@ mod axum_http_tracker_server { use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_whitelisted_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2351,7 +2405,7 @@ mod axum_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2366,11 +2420,13 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let http_tracker = start_whitelisted_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2380,13 +2436,13 @@ mod axum_http_tracker_server { ) .await; - http_tracker + test_env .tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2416,26 +2472,23 @@ mod axum_http_tracker_server { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::server::start_private_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_respond_to_authenticated_peers() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; - let key = http_tracker_server - .tracker - .generate_auth_key(Duration::from_secs(60)) - .await - .unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), key.id()) + let response = Client::authenticated(test_env.bind_address().clone(), key.id()) .announce(&QueryBuilder::default().query()) .await; @@ -2444,11 +2497,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2457,11 +2511,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!( "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) @@ -2472,12 +2527,13 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(http_tracker_server.get_connection_info(), unregistered_key) + let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -2490,7 +2546,6 @@ mod axum_http_tracker_server { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::http::Version; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; @@ -2500,15 +2555,17 @@ mod axum_http_tracker_server { use crate::http::client::Client; use crate::http::requests; use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::server::start_private_http_tracker; + use crate::http::test_environment::running_test_environment; + use crate::Axum; #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let http_tracker_server = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; - let response = Client::new(http_tracker_server.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .get(&format!( "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" )) @@ -2519,11 +2576,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let http_tracker = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2533,7 +2591,7 @@ mod axum_http_tracker_server { ) .await; - let response = Client::new(http_tracker.get_connection_info()) + let response = Client::new(test_env.bind_address().clone()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2548,11 +2606,12 @@ mod axum_http_tracker_server { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let http_tracker = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2562,9 +2621,9 @@ mod axum_http_tracker_server { ) .await; - let key = http_tracker.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), key.id()) + let response = Client::authenticated(test_env.bind_address().clone(), key.id()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2591,11 +2650,12 @@ mod axum_http_tracker_server { // There is not authentication error // code-review: should this really be this way? - let http_tracker = start_private_http_tracker(Version::Axum).await; + let test_env = + running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - http_tracker + test_env .add_torrent_peer( &info_hash, &PeerBuilder::default() @@ -2607,7 +2667,7 @@ mod axum_http_tracker_server { let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(http_tracker.get_connection_info(), false_key) + let response = Client::authenticated(test_env.bind_address().clone(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index 585a8f934..f805d9a05 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; -use crate::common::tracker::{tracker_configuration, tracker_instance}; +use crate::common::tracker::new_tracker; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -88,9 +88,9 @@ pub async fn running_test_environment() -> RunningTestEnvironment { } pub fn udp_server() -> StoppedUdpServer { - let config = tracker_configuration(); + let config = Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()); - let tracker = tracker_instance(&config); + let tracker = new_tracker(config.clone()); UdpServer::new(config.udp_trackers[0].clone(), tracker) } From fac2be86cedc8e4df650766653d963dee046aae0 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 3 Mar 2023 01:35:03 +0100 Subject: [PATCH 393/435] fix: all http tracker tests --- src/apis/server.rs | 2 +- .../{server.rs => launcher.rs} | 18 +- src/http/axum_implementation/mod.rs | 2 +- src/http/axum_implementation/routes.rs | 2 +- .../{server.rs => launcher.rs} | 6 +- src/http/warp_implementation/mod.rs | 2 +- src/jobs/http_tracker.rs | 8 +- src/main.rs | 2 +- src/tracker/mod.rs | 29 +-- src/tracker/services/common.rs | 2 +- src/tracker/services/statistics.rs | 2 +- src/tracker/services/torrent.rs | 14 +- src/udp/handlers.rs | 22 +- tests/common/tracker.rs | 15 +- tests/http/test_environment.rs | 18 +- tests/http_tracker.rs | 232 +++++++++++++++++- 16 files changed, 283 insertions(+), 93 deletions(-) rename src/http/axum_implementation/{server.rs => launcher.rs} (90%) rename src/http/warp_implementation/{server.rs => launcher.rs} (97%) diff --git a/src/apis/server.rs b/src/apis/server.rs index 4594456fb..8d4c703b7 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -201,7 +201,7 @@ mod tests { async fn it_should_be_able_to_start_from_stopped_state_and_then_stop_again() { let cfg = tracker_configuration(); - let tracker = Arc::new(tracker::Tracker::new(&cfg, None, statistics::Repo::new()).unwrap()); + let tracker = Arc::new(tracker::Tracker::new(cfg.clone(), None, statistics::Repo::new()).unwrap()); let stopped_api_server = ApiServer::new(cfg.http_api.clone(), tracker); diff --git a/src/http/axum_implementation/server.rs b/src/http/axum_implementation/launcher.rs similarity index 90% rename from src/http/axum_implementation/server.rs rename to src/http/axum_implementation/launcher.rs index a12d60332..95fa9f2b7 100644 --- a/src/http/axum_implementation/server.rs +++ b/src/http/axum_implementation/launcher.rs @@ -19,9 +19,9 @@ pub enum Error { Error(String), } -pub struct Server; +pub struct Launcher; -impl Server { +impl Launcher { pub fn start_from_tcp_listener_with_graceful_shutdown( tcp_listener: std::net::TcpListener, tracker: Arc, @@ -30,12 +30,12 @@ impl Server { where F: Future + Send + 'static, { - let app = router(&tracker); + let app = router(tracker); Box::pin(async { axum::Server::from_tcp(tcp_listener) .expect("Could not bind to tcp listener.") - .serve(app.into_make_service()) + .serve(app.into_make_service_with_connect_info::()) .with_graceful_shutdown(shutdown_signal) .await .expect("Axum server crashed."); @@ -51,7 +51,7 @@ impl Server { where F: Future + Send + 'static, { - let app = router(&tracker); + let app = router(tracker); let handle = Handle::new(); @@ -69,7 +69,7 @@ impl Server { axum_server::from_tcp_rustls(tcp_listener, tls_config) .handle(handle) - .serve(app.into_make_service()) + .serve(app.into_make_service_with_connect_info::()) .await .expect("Axum server crashed."); }) @@ -77,7 +77,7 @@ impl Server { } #[async_trait] -impl HttpServerLauncher for Server { +impl HttpServerLauncher for Launcher { fn new() -> Self { Self {} } @@ -114,7 +114,7 @@ impl HttpServerLauncher for Server { } } -pub fn start(socket_addr: std::net::SocketAddr, tracker: &Arc) -> impl Future> { +pub fn start(socket_addr: std::net::SocketAddr, tracker: Arc) -> impl Future> { let app = router(tracker); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); @@ -128,7 +128,7 @@ pub fn start(socket_addr: std::net::SocketAddr, tracker: &Arc) -> impl pub fn start_tls( socket_addr: std::net::SocketAddr, ssl_config: RustlsConfig, - tracker: &Arc, + tracker: Arc, ) -> impl Future> { let app = router(tracker); diff --git a/src/http/axum_implementation/mod.rs b/src/http/axum_implementation/mod.rs index ecc60e1f8..79d230255 100644 --- a/src/http/axum_implementation/mod.rs +++ b/src/http/axum_implementation/mod.rs @@ -1,8 +1,8 @@ pub mod extractors; pub mod handlers; +pub mod launcher; pub mod query; pub mod requests; pub mod responses; pub mod routes; -pub mod server; pub mod services; diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index af987ece2..b0f30453d 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -7,7 +7,7 @@ use axum_client_ip::SecureClientIpSource; use super::handlers::{announce, scrape}; use crate::tracker::Tracker; -pub fn router(tracker: &Arc) -> Router { +pub fn router(tracker: Arc) -> Router { Router::new() // Announce request .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) diff --git a/src/http/warp_implementation/server.rs b/src/http/warp_implementation/launcher.rs similarity index 97% rename from src/http/warp_implementation/server.rs rename to src/http/warp_implementation/launcher.rs index 8d01559f3..777bd930b 100644 --- a/src/http/warp_implementation/server.rs +++ b/src/http/warp_implementation/launcher.rs @@ -15,9 +15,9 @@ pub enum Error { Error(String), } -pub struct Server; +pub struct Launcher; -impl Server { +impl Launcher { pub fn start_with_graceful_shutdown( addr: SocketAddr, tracker: Arc, @@ -50,7 +50,7 @@ impl Server { } } -impl HttpServerLauncher for Server { +impl HttpServerLauncher for Launcher { fn new() -> Self { Self {} } diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs index 2ceda2e68..c0e046f4f 100644 --- a/src/http/warp_implementation/mod.rs +++ b/src/http/warp_implementation/mod.rs @@ -2,11 +2,11 @@ pub mod error; pub mod filter_helpers; pub mod filters; pub mod handlers; +pub mod launcher; pub mod peer_builder; pub mod request; pub mod response; pub mod routes; -pub mod server; use warp::Rejection; diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index ce546f608..40caa8e88 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -7,8 +7,8 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use crate::http::axum_implementation::server; -use crate::http::warp_implementation::server::Http; +use crate::http::axum_implementation::launcher; +use crate::http::warp_implementation::launcher::Http; use crate::http::Version; use crate::tracker; @@ -98,7 +98,7 @@ async fn start_axum(config: &HttpTracker, tracker: Arc) -> Joi if !ssl_enabled { info!("Starting Torrust HTTP tracker server on: http://{}", bind_addr); - let handle = server::start(bind_addr, &tracker); + let handle = launcher::start(bind_addr, tracker); tx.send(ServerJobStarted()) .expect("the HTTP tracker server should not be dropped"); @@ -113,7 +113,7 @@ async fn start_axum(config: &HttpTracker, tracker: Arc) -> Joi .await .unwrap(); - let handle = server::start_tls(bind_addr, ssl_config, &tracker); + let handle = launcher::start_tls(bind_addr, ssl_config, tracker); tx.send(ServerJobStarted()) .expect("the HTTP tracker server should not be dropped"); diff --git a/src/main.rs b/src/main.rs index fcb8331a4..b0cc68b12 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,7 +30,7 @@ async fn main() { let (stats_event_sender, stats_repository) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(&config.clone(), stats_event_sender, stats_repository) { + let tracker = match tracker::Tracker::new(config.clone(), stats_event_sender, stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 18ada69e0..874233d91 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -91,15 +91,17 @@ impl Tracker { /// /// Will return a `databases::error::Error` if unable to connect to database. pub fn new( - config: &Arc, + config: Arc, stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { let database = databases::driver::build(&config.db_driver, &config.db_path)?; + let mode = config.mode; + Ok(Tracker { - config: config.clone(), - mode: config.mode, + config, + mode, keys: RwLock::new(std::collections::HashMap::new()), whitelist: RwLock::new(std::collections::HashSet::new()), torrents: RwLock::new(std::collections::BTreeMap::new()), @@ -550,17 +552,15 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; - use torrust_tracker_test_helpers::configuration::{self}; + use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer::{self, Peer}; - use crate::tracker::statistics::Keeper; + use crate::tracker::services::common::tracker_factory; use crate::tracker::{TorrentsMetrics, Tracker}; pub fn public_tracker() -> Tracker { @@ -587,21 +587,6 @@ mod tests { tracker_factory(configuration) } - pub fn tracker_factory(configuration: Configuration) -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } - } - fn sample_info_hash() -> InfoHash { "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() } diff --git a/src/tracker/services/common.rs b/src/tracker/services/common.rs index 39aa3cc0b..757725263 100644 --- a/src/tracker/services/common.rs +++ b/src/tracker/services/common.rs @@ -9,7 +9,7 @@ use crate::tracker::Tracker; /// /// Will panic if tracker cannot be instantiated. #[must_use] -pub fn tracker_factory(configuration: &Arc) -> Tracker { +pub fn tracker_factory(configuration: Arc) -> Tracker { // todo: the tracker initialization is duplicated in many places. // Initialize stats tracker diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 94a9b1bd5..35fd49db5 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -48,7 +48,7 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let tracker_metrics = get_metrics(tracker.clone()).await; diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index fc5686e23..50b17744e 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -150,7 +150,7 @@ mod tests { #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let torrent_info = get_torrent_info( tracker.clone(), @@ -163,7 +163,7 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -204,7 +204,7 @@ mod tests { #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; @@ -213,7 +213,7 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -237,7 +237,7 @@ mod tests { #[tokio::test] async fn should_allow_limiting_the_number_of_torrents_in_the_result() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -261,7 +261,7 @@ mod tests { #[tokio::test] async fn should_allow_using_pagination_in_the_result() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -294,7 +294,7 @@ mod tests { #[tokio::test] async fn should_return_torrents_ordered_by_info_hash() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let tracker = Arc::new(tracker_factory(tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 411590d2f..074f362f4 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -266,20 +266,20 @@ mod tests { fn initialized_public_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - initialized_tracker(&configuration) + initialized_tracker(configuration) } fn initialized_private_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - initialized_tracker(&configuration) + initialized_tracker(configuration) } fn initialized_whitelisted_tracker() -> Arc { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - initialized_tracker(&configuration) + initialized_tracker(configuration) } - fn initialized_tracker(configuration: &Arc) -> Arc { + fn initialized_tracker(configuration: Arc) -> Arc { let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) } @@ -436,7 +436,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await @@ -454,7 +454,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await @@ -697,7 +697,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -927,7 +927,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -959,7 +959,7 @@ mod tests { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = - Arc::new(tracker::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()); + Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1242,7 +1242,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1274,7 +1274,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(&tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index 7451bbc36..ed2d8392b 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use torrust_tracker::tracker::statistics::Keeper; +use torrust_tracker::tracker::services::common::tracker_factory; use torrust_tracker::tracker::Tracker; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; @@ -12,19 +12,8 @@ pub fn new_tracker(configuration: Arc Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - // Initialize logging logging::setup(&configuration); - tracker + Arc::new(tracker_factory(configuration)) } diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index a2cb4619c..40e504b08 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -41,7 +41,7 @@ impl TestEnvironment> { let tracker = new_tracker(cfg.clone()); - let http_server = stopped_http_server(cfg.http_trackers[0].clone()); + let http_server = http_server(cfg.http_trackers[0].clone()); Self { cfg, @@ -61,10 +61,12 @@ impl TestEnvironment> { } } + #[allow(dead_code)] pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { &self.state.http_server.cfg } + #[allow(dead_code)] pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpTracker { &mut self.state.http_server.cfg } @@ -91,12 +93,13 @@ impl TestEnvironment> { &self.state.http_server.state.bind_addr } + #[allow(dead_code)] pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { &self.state.http_server.cfg } } -#[allow(clippy::module_name_repetitions)] +#[allow(clippy::module_name_repetitions, dead_code)] pub fn stopped_test_environment( cfg: torrust_tracker_configuration::Configuration, ) -> StoppedTestEnvironment { @@ -110,17 +113,8 @@ pub async fn running_test_environment( TestEnvironment::new_running(cfg).await } -pub fn stopped_http_server( - cfg: torrust_tracker_configuration::HttpTracker, -) -> StoppedHttpServer { +pub fn http_server(cfg: torrust_tracker_configuration::HttpTracker) -> StoppedHttpServer { let http_server = I::new(); HttpServer::new(cfg, http_server) } - -pub async fn running_http_server( - cfg: torrust_tracker_configuration::HttpTracker, - tracker: Arc, -) -> RunningHttpServer { - stopped_http_server(cfg).start(tracker).await.unwrap() -} diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 22a6c44ff..d29f674e6 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -12,8 +12,8 @@ mod common; mod http; -pub type Axum = torrust_tracker::http::axum_implementation::server::Server; -pub type Warp = torrust_tracker::http::warp_implementation::server::Server; +pub type Axum = torrust_tracker::http::axum_implementation::launcher::Launcher; +pub type Warp = torrust_tracker::http::warp_implementation::launcher::Launcher; mod test_env_test_environment { use crate::http::test_environment::running_test_environment; @@ -46,7 +46,7 @@ mod warp_test_env { }; use crate::http::client::Client; use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::{running_test_environment, stopped_test_environment}; + use crate::http::test_environment::running_test_environment; use crate::Warp; #[tokio::test] @@ -65,6 +65,8 @@ mod warp_test_env { .await; assert_could_not_find_remote_address_on_xff_header_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -80,6 +82,8 @@ mod warp_test_env { .await; assert_invalid_remote_address_on_xff_header_error_response(response).await; + + test_env.stop().await; } } @@ -132,6 +136,8 @@ mod warp_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -182,6 +188,8 @@ mod warp_test_env { .await; assert_internal_server_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -199,6 +207,8 @@ mod warp_test_env { assert_invalid_info_hash_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -219,6 +229,8 @@ mod warp_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -238,6 +250,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -257,6 +271,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -283,6 +299,8 @@ mod warp_test_env { assert_invalid_peer_id_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -302,6 +320,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -321,6 +341,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -350,6 +372,8 @@ mod warp_test_env { assert_is_announce_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -369,6 +393,8 @@ mod warp_test_env { assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -395,6 +421,8 @@ mod warp_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -434,6 +462,8 @@ mod warp_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -457,6 +487,8 @@ mod warp_test_env { let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; assert_empty_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -497,6 +529,8 @@ mod warp_test_env { }; assert_compact_announce_response(response, &expected_response).await; + + test_env.stop().await; } #[tokio::test] @@ -531,6 +565,8 @@ mod warp_test_env { .await; assert!(!is_a_compact_announce_response(response).await); + + test_env.stop().await; } async fn is_a_compact_announce_response(response: Response) -> bool { @@ -551,6 +587,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -565,6 +605,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -585,6 +629,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -599,6 +647,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -613,6 +665,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -633,6 +689,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -657,6 +717,8 @@ mod warp_test_env { assert_eq!(peer_addr.ip(), client_ip); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -692,6 +754,8 @@ mod warp_test_env { assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -727,6 +791,8 @@ mod warp_test_env { assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -760,6 +826,8 @@ mod warp_test_env { let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + + test_env.stop().await; } } @@ -795,6 +863,8 @@ mod warp_test_env { let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_internal_server_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -814,6 +884,8 @@ mod warp_test_env { // code-review: it's not returning the invalid info hash error assert_internal_server_error_response(response).await; } + + test_env.stop().await; } #[tokio::test] @@ -853,6 +925,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -892,6 +966,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -910,6 +986,8 @@ mod warp_test_env { .await; assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + + test_env.stop().await; } #[tokio::test] @@ -935,6 +1013,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -955,6 +1035,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -975,6 +1059,10 @@ mod warp_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; } } } @@ -1005,6 +1093,8 @@ mod warp_test_env { .await; assert_torrent_not_in_whitelist_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1027,6 +1117,8 @@ mod warp_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } } @@ -1073,6 +1165,8 @@ mod warp_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -1119,6 +1213,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } } } @@ -1153,6 +1249,8 @@ mod warp_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1182,6 +1280,8 @@ mod warp_test_env { .await; assert_warp_invalid_authentication_key_error_response(response).await; + + test_env.stop().await; } } @@ -1230,6 +1330,8 @@ mod warp_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -1271,6 +1373,8 @@ mod warp_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -1305,6 +1409,8 @@ mod warp_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } } } @@ -1347,6 +1453,8 @@ mod axum_test_env { .await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1362,6 +1470,8 @@ mod axum_test_env { .await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; } } @@ -1413,6 +1523,8 @@ mod axum_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1422,6 +1534,8 @@ mod axum_test_env { let response = Client::new(test_env.bind_address().clone()).get("announce").await; assert_missing_query_params_for_announce_request_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1435,6 +1549,8 @@ mod axum_test_env { .await; assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + + test_env.stop().await; } #[tokio::test] @@ -1476,6 +1592,8 @@ mod axum_test_env { .await; assert_bad_announce_request_error_response(response, "missing param port").await; + + test_env.stop().await; } #[tokio::test] @@ -1493,6 +1611,8 @@ mod axum_test_env { assert_cannot_parse_query_params_error_response(response, "").await; } + + test_env.stop().await; } #[tokio::test] @@ -1513,6 +1633,8 @@ mod axum_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1532,6 +1654,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1551,6 +1675,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1577,6 +1703,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1596,6 +1724,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1615,6 +1745,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1642,6 +1774,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1661,6 +1795,8 @@ mod axum_test_env { assert_bad_announce_request_error_response(response, "invalid param value").await; } + + test_env.stop().await; } #[tokio::test] @@ -1687,6 +1823,8 @@ mod axum_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -1726,6 +1864,8 @@ mod axum_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -1775,6 +1915,8 @@ mod axum_test_env { }, ) .await; + + test_env.stop().await; } #[tokio::test] @@ -1798,6 +1940,8 @@ mod axum_test_env { let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; assert_empty_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -1838,6 +1982,8 @@ mod axum_test_env { }; assert_compact_announce_response(response, &expected_response).await; + + test_env.stop().await; } #[tokio::test] @@ -1872,6 +2018,8 @@ mod axum_test_env { .await; assert!(!is_a_compact_announce_response(response).await); + + test_env.stop().await; } async fn is_a_compact_announce_response(response: Response) -> bool { @@ -1892,6 +2040,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1906,6 +2058,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1926,6 +2082,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1933,13 +2093,19 @@ mod axum_test_env { let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + let res = Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; + println!("{:?}", res.text().await.unwrap()); + let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1954,6 +2120,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1974,6 +2144,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -1998,6 +2172,8 @@ mod axum_test_env { assert_eq!(peer_addr.ip(), client_ip); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -2033,6 +2209,8 @@ mod axum_test_env { assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -2068,6 +2246,8 @@ mod axum_test_env { assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; } #[tokio::test] @@ -2101,6 +2281,8 @@ mod axum_test_env { let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + + test_env.stop().await; } } @@ -2140,6 +2322,8 @@ mod axum_test_env { let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -2158,6 +2342,8 @@ mod axum_test_env { assert_cannot_parse_query_params_error_response(response, "").await; } + + test_env.stop().await; } #[tokio::test] @@ -2197,6 +2383,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2236,6 +2424,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2254,6 +2444,8 @@ mod axum_test_env { .await; assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + + test_env.stop().await; } #[tokio::test] @@ -2279,6 +2471,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2299,6 +2493,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; } #[tokio::test] @@ -2319,6 +2517,10 @@ mod axum_test_env { let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; } } } @@ -2349,6 +2551,8 @@ mod axum_test_env { .await; assert_torrent_not_in_whitelist_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -2370,6 +2574,8 @@ mod axum_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } } @@ -2416,6 +2622,8 @@ mod axum_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2462,6 +2670,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } } } @@ -2493,6 +2703,8 @@ mod axum_test_env { .await; assert_is_announce_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -2507,6 +2719,8 @@ mod axum_test_env { .await; assert_authentication_error_response(response).await; + + test_env.stop().await; } #[tokio::test] @@ -2533,11 +2747,13 @@ mod axum_test_env { // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) + let response = Client::authenticated(test_env.bind_address(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; assert_authentication_error_response(response).await; + + test_env.stop().await; } } @@ -2602,6 +2818,8 @@ mod axum_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2643,6 +2861,8 @@ mod axum_test_env { .build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } #[tokio::test] @@ -2678,6 +2898,8 @@ mod axum_test_env { let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; } } } From 5b95b5d596278db48bda925a3414aec764de2ea5 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 3 Mar 2023 11:59:52 +0100 Subject: [PATCH 394/435] refactor: renamed `tracker_interface` to `server` and shortened `configuration::ephemeral` calls --- src/apis/server.rs | 3 +- src/http/axum_implementation/launcher.rs | 2 +- src/http/mod.rs | 2 +- src/http/{tracker_interface.rs => server.rs} | 1 + src/http/warp_implementation/launcher.rs | 2 +- src/tracker/services/statistics.rs | 3 +- src/tracker/services/torrent.rs | 6 +- src/udp/handlers.rs | 3 +- tests/api/test_environment.rs | 3 +- tests/http/test_environment.rs | 2 +- tests/http_tracker.rs | 322 +++++++------------ tests/udp/test_environment.rs | 3 +- 12 files changed, 144 insertions(+), 208 deletions(-) rename src/http/{tracker_interface.rs => server.rs} (98%) diff --git a/src/apis/server.rs b/src/apis/server.rs index 8d4c703b7..4c8fbaada 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -188,13 +188,14 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; use crate::apis::server::ApiServer; use crate::tracker; use crate::tracker::statistics; fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) + Arc::new(configuration::ephemeral()) } #[tokio::test] diff --git a/src/http/axum_implementation/launcher.rs b/src/http/axum_implementation/launcher.rs index 95fa9f2b7..a49efd11d 100644 --- a/src/http/axum_implementation/launcher.rs +++ b/src/http/axum_implementation/launcher.rs @@ -11,7 +11,7 @@ use log::info; use warp::hyper; use super::routes::router; -use crate::http::tracker_interface::HttpServerLauncher; +use crate::http::server::HttpServerLauncher; use crate::tracker::Tracker; #[derive(Debug)] diff --git a/src/http/mod.rs b/src/http/mod.rs index c2cbb43df..b4841c0af 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; pub mod percent_encoding; -pub mod tracker_interface; +pub mod server; pub mod warp_implementation; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] diff --git a/src/http/tracker_interface.rs b/src/http/server.rs similarity index 98% rename from src/http/tracker_interface.rs rename to src/http/server.rs index fc4ba9c95..e7b6c1888 100644 --- a/src/http/tracker_interface.rs +++ b/src/http/server.rs @@ -32,6 +32,7 @@ pub type StoppedHttpServer = HttpServer>; #[allow(clippy::module_name_repetitions)] pub type RunningHttpServer = HttpServer>; +#[allow(clippy::module_name_repetitions)] pub struct HttpServer { pub cfg: torrust_tracker_configuration::HttpTracker, pub state: S, diff --git a/src/http/warp_implementation/launcher.rs b/src/http/warp_implementation/launcher.rs index 777bd930b..46ec2bf3c 100644 --- a/src/http/warp_implementation/launcher.rs +++ b/src/http/warp_implementation/launcher.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use futures::future::BoxFuture; use super::routes; -use crate::http::tracker_interface::HttpServerLauncher; +use crate::http::server::HttpServerLauncher; use crate::tracker; use crate::tracker::Tracker; diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 35fd49db5..28cd0b962 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -37,13 +37,14 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; use crate::tracker; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) + Arc::new(configuration::ephemeral()) } #[tokio::test] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index 50b17744e..b04b4e1dc 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -138,6 +138,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; @@ -145,7 +146,7 @@ mod tests { use crate::tracker::services::torrent::{get_torrent_info, Info}; pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) + Arc::new(configuration::ephemeral()) } #[tokio::test] @@ -192,6 +193,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; @@ -199,7 +201,7 @@ mod tests { use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; pub fn tracker_configuration() -> Arc { - Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()) + Arc::new(configuration::ephemeral()) } #[tokio::test] diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 074f362f4..211a0d1ba 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -252,6 +252,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::TrackerMode; + use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::{Current, Time}; use crate::tracker::{self, peer, statistics}; @@ -261,7 +262,7 @@ mod tests { } fn default_testing_tracker_configuration() -> Configuration { - torrust_tracker_test_helpers::configuration::ephemeral() + configuration::ephemeral() } fn initialized_public_tracker() -> Arc { diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index 1565530c1..1f8708650 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -4,6 +4,7 @@ use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServe use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; +use torrust_tracker_test_helpers::configuration; use super::connection_info::ConnectionInfo; use crate::common::tracker::new_tracker; @@ -89,7 +90,7 @@ pub fn running_test_environment() -> RunningTestEnvironment { } pub fn api_server() -> StoppedApiServer { - let config = Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()); + let config = Arc::new(configuration::ephemeral()); let tracker = new_tracker(config.clone()); diff --git a/tests/http/test_environment.rs b/tests/http/test_environment.rs index 40e504b08..459c2fbe6 100644 --- a/tests/http/test_environment.rs +++ b/tests/http/test_environment.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use torrust_tracker::http::tracker_interface::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; +use torrust_tracker::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index d29f674e6..a4e87115a 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -16,19 +16,21 @@ pub type Axum = torrust_tracker::http::axum_implementation::launcher::Launcher; pub type Warp = torrust_tracker::http::warp_implementation::launcher::Launcher; mod test_env_test_environment { + use torrust_tracker_test_helpers::configuration; + use crate::http::test_environment::running_test_environment; use crate::{Axum, Warp}; #[tokio::test] async fn should_be_able_to_start_and_stop_a_test_environment_using_axum() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; test_env.stop().await; } #[tokio::test] async fn should_be_able_to_start_and_stop_a_test_environment_using_warp() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; test_env.stop().await; } @@ -39,6 +41,7 @@ mod warp_test_env { mod for_all_config_modes { mod running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::{ assert_could_not_find_remote_address_on_xff_header_error_response, @@ -54,9 +57,7 @@ mod warp_test_env { // If the tracker is running behind a reverse proxy, the peer IP is the // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -71,9 +72,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -107,6 +106,7 @@ mod warp_test_env { use reqwest::Response; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ @@ -125,7 +125,7 @@ mod warp_test_env { #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -142,7 +142,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let response = Client::new(test_env.bind_address().clone()).get("announce").await; @@ -151,7 +151,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; // Without `info_hash` param @@ -194,7 +194,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -218,7 +218,7 @@ mod warp_test_env { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -235,7 +235,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -256,7 +256,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -277,7 +277,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -305,7 +305,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -326,7 +326,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -349,7 +349,7 @@ mod warp_test_env { async fn should_not_fail_when_the_event_param_is_invalid() { // All invalid values are ignored as if the `event` param were empty - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -378,7 +378,7 @@ mod warp_test_env { #[tokio::test] async fn should_not_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -399,8 +399,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let response = Client::new(test_env.bind_address().clone()) .announce( @@ -427,8 +426,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -468,8 +466,7 @@ mod warp_test_env { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); @@ -496,8 +493,7 @@ mod warp_test_env { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -538,8 +534,7 @@ mod warp_test_env { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -577,8 +572,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) @@ -595,8 +589,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -615,8 +608,7 @@ mod warp_test_env { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce( @@ -637,8 +629,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) @@ -655,8 +646,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -675,8 +665,7 @@ mod warp_test_env { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce( @@ -697,8 +686,7 @@ mod warp_test_env { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); @@ -730,11 +718,10 @@ mod warp_test_env { 127.0.0.1 external_ip = "2.137.87.41" */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -767,11 +754,10 @@ mod warp_test_env { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -804,9 +790,7 @@ mod warp_test_env { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -846,6 +830,7 @@ mod warp_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; @@ -858,8 +843,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_request_is_empty() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_internal_server_error_response(response).await; @@ -869,8 +853,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let mut params = QueryBuilder::default().query().params(); @@ -890,8 +873,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -931,8 +913,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -972,8 +953,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -992,8 +972,7 @@ mod warp_test_env { #[tokio::test] async fn should_accept_multiple_infohashes() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); @@ -1019,8 +998,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1043,8 +1021,7 @@ mod warp_test_env { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1073,6 +1050,7 @@ mod warp_test_env { use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; @@ -1082,9 +1060,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1100,9 +1076,7 @@ mod warp_test_env { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1127,6 +1101,7 @@ mod warp_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; use crate::http::asserts::assert_scrape_response; @@ -1138,9 +1113,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1171,9 +1144,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1239,8 +1210,7 @@ mod warp_test_env { #[tokio::test] async fn should_respond_to_authenticated_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); @@ -1255,8 +1225,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1269,8 +1238,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_if_the_peer_authentication_key_is_not_valid() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -1293,6 +1261,7 @@ mod warp_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; use crate::http::asserts::assert_scrape_response; @@ -1304,8 +1273,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1336,8 +1304,7 @@ mod warp_test_env { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1381,8 +1348,7 @@ mod warp_test_env { async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { // There is not authentication error - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1430,6 +1396,7 @@ mod axum_test_env { mod for_all_config_modes { mod and_running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::http::client::Client; @@ -1442,9 +1409,7 @@ mod axum_test_env { // If the tracker is running behind a reverse proxy, the peer IP is the // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -1459,9 +1424,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let params = QueryBuilder::default().query().params(); @@ -1495,6 +1458,7 @@ mod axum_test_env { use reqwest::Response; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ @@ -1512,7 +1476,7 @@ mod axum_test_env { #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1529,7 +1493,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let response = Client::new(test_env.bind_address().clone()).get("announce").await; @@ -1540,7 +1504,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_url_query_parameters_are_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let invalid_query_param = "a=b=c"; @@ -1555,7 +1519,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; // Without `info_hash` param @@ -1598,7 +1562,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1622,7 +1586,7 @@ mod axum_test_env { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1639,7 +1603,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1660,7 +1624,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1681,7 +1645,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1709,7 +1673,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1730,7 +1694,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1751,7 +1715,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_event_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1780,7 +1744,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral()).await; + let test_env = running_test_environment::(configuration::ephemeral()).await; let mut params = QueryBuilder::default().query().params(); @@ -1801,8 +1765,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let response = Client::new(test_env.bind_address().clone()) .announce( @@ -1829,8 +1792,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1870,8 +1832,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1921,8 +1882,7 @@ mod axum_test_env { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); @@ -1949,8 +1909,7 @@ mod axum_test_env { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -1991,8 +1950,7 @@ mod axum_test_env { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2030,8 +1988,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) @@ -2048,8 +2005,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -2068,8 +2024,7 @@ mod axum_test_env { async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce( @@ -2090,15 +2045,12 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let res = Client::new(test_env.bind_address().clone()) + Client::new(test_env.bind_address().clone()) .announce(&QueryBuilder::default().query()) .await; - println!("{:?}", res.text().await.unwrap()); - let stats = test_env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); @@ -2110,8 +2062,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) @@ -2130,8 +2081,7 @@ mod axum_test_env { async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; Client::new(test_env.bind_address().clone()) .announce( @@ -2152,8 +2102,7 @@ mod axum_test_env { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); @@ -2185,11 +2134,10 @@ mod axum_test_env { 127.0.0.1 external_ip = "2.137.87.41" */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -2222,11 +2170,10 @@ mod axum_test_env { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); @@ -2259,9 +2206,7 @@ mod axum_test_env { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_with_reverse_proxy()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2301,6 +2246,7 @@ mod axum_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; use crate::http::asserts::{ @@ -2317,8 +2263,7 @@ mod axum_test_env { //#[tokio::test] #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let response = Client::new(test_env.bind_address().clone()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; @@ -2328,8 +2273,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let mut params = QueryBuilder::default().query().params(); @@ -2348,8 +2292,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2389,8 +2332,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2430,8 +2372,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2450,8 +2391,7 @@ mod axum_test_env { #[tokio::test] async fn should_accept_multiple_infohashes() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); @@ -2477,8 +2417,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2501,8 +2440,7 @@ mod axum_test_env { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_ipv6()).await; + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2531,6 +2469,7 @@ mod axum_test_env { use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::http::client::Client; @@ -2540,9 +2479,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2557,9 +2494,7 @@ mod axum_test_env { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2584,6 +2519,7 @@ mod axum_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; use crate::http::asserts::assert_scrape_response; @@ -2595,9 +2531,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2628,9 +2562,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_whitelisted()) - .await; + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2693,8 +2625,7 @@ mod axum_test_env { #[tokio::test] async fn should_respond_to_authenticated_peers() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); @@ -2709,8 +2640,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2747,7 +2677,7 @@ mod axum_test_env { // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(test_env.bind_address(), unregistered_key) + let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -2765,6 +2695,7 @@ mod axum_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; @@ -2792,8 +2723,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2824,8 +2754,7 @@ mod axum_test_env { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -2870,8 +2799,7 @@ mod axum_test_env { // There is not authentication error // code-review: should this really be this way? - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index f805d9a05..02d51c4bf 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -5,6 +5,7 @@ use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; +use torrust_tracker_test_helpers::configuration; use crate::common::tracker::new_tracker; @@ -88,7 +89,7 @@ pub async fn running_test_environment() -> RunningTestEnvironment { } pub fn udp_server() -> StoppedUdpServer { - let config = Arc::new(torrust_tracker_test_helpers::configuration::ephemeral()); + let config = Arc::new(configuration::ephemeral()); let tracker = new_tracker(config.clone()); From d020c5a514d4f09669f99c956ee8d47521752872 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 7 Mar 2023 18:05:35 +0100 Subject: [PATCH 395/435] refactor: `tracker_api` launching and testing --- src/apis/routes.rs | 2 +- src/apis/server.rs | 212 ++++++++++++++++++---------------- src/jobs/tracker_apis.rs | 4 +- src/jobs/udp_tracker.rs | 4 +- src/udp/handlers.rs | 83 ++++++------- src/udp/server.rs | 31 ++--- tests/api/test_environment.rs | 55 ++++----- tests/tracker_api.rs | 111 +++++++++++------- tests/udp/test_environment.rs | 49 ++++---- tests/udp_tracker.rs | 12 +- 10 files changed, 292 insertions(+), 271 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 281979aa5..6e3218605 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -10,7 +10,7 @@ use super::handlers::{ use super::middlewares::auth::auth; use crate::tracker::Tracker; -pub fn router(tracker: &Arc) -> Router { +pub fn router(tracker: Arc) -> Router { Router::new() // Stats .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) diff --git a/src/apis/server.rs b/src/apis/server.rs index 4c8fbaada..0a5013161 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -1,15 +1,16 @@ -use std::net::{SocketAddr, TcpListener}; +use std::net::SocketAddr; +use std::str::FromStr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; +use futures::future::BoxFuture; use futures::Future; use log::info; -use tokio::task::JoinHandle; use warp::hyper; use super::routes::router; -use crate::signals::shutdown_signal_with_message; +use crate::signals::shutdown_signal; use crate::tracker::Tracker; #[derive(Debug)] @@ -25,133 +26,150 @@ pub type RunningApiServer = ApiServer; #[allow(clippy::module_name_repetitions)] pub struct ApiServer { pub cfg: torrust_tracker_configuration::HttpApi, - pub tracker: Arc, pub state: S, } pub struct Stopped; pub struct Running { - pub bind_address: SocketAddr, - stop_job_sender: tokio::sync::oneshot::Sender, - job: JoinHandle<()>, + pub bind_addr: SocketAddr, + task_killer: tokio::sync::oneshot::Sender, + task: tokio::task::JoinHandle<()>, } impl ApiServer { - pub fn new(cfg: torrust_tracker_configuration::HttpApi, tracker: Arc) -> Self { - Self { - cfg, - tracker, - state: Stopped {}, - } + pub fn new(cfg: torrust_tracker_configuration::HttpApi) -> Self { + Self { cfg, state: Stopped {} } } - /// # Errors - /// - /// Will return `Err` if `TcpListener` can not bind to `bind_address`. - pub fn start(self) -> Result, Error> { - let listener = TcpListener::bind(&self.cfg.bind_address).map_err(|e| Error::Error(e.to_string()))?; - - let bind_address = listener.local_addr().map_err(|e| Error::Error(e.to_string()))?; - - let cfg = self.cfg.clone(); - let tracker = self.tracker.clone(); - - let (sender, receiver) = tokio::sync::oneshot::channel::(); - - let job = tokio::spawn(async move { - if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, cfg.ssl_cert_path, cfg.ssl_key_path) { - let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) - .await - .expect("Could not read ssl cert and/or key."); - - start_tls_from_tcp_listener_with_graceful_shutdown(listener, tls_config, &tracker, receiver) - .await - .expect("Could not start from tcp listener with tls."); - } else { - start_from_tcp_listener_with_graceful_shutdown(listener, &tracker, receiver) - .await - .expect("Could not start from tcp listener."); - } + pub async fn start(self, tracker: Arc) -> Result, Error> { + let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); + let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + + let configuration = self.cfg.clone(); + + let task = tokio::spawn(async move { + let (bind_addr, server) = Launcher::start(&configuration, tracker, shutdown_signal(shutdown_receiver)); + + addr_sender.send(bind_addr).unwrap(); + + server.await; }); - let running_api_server: ApiServer = ApiServer { + let bind_address = addr_receiver.await.expect("Could not receive bind_address."); + + Ok(ApiServer { cfg: self.cfg, - tracker: self.tracker, state: Running { - bind_address, - stop_job_sender: sender, - job, + bind_addr: bind_address, + task_killer: shutdown_sender, + task, }, - }; - - Ok(running_api_server) + }) } } impl ApiServer { - /// # Errors - /// - /// Will return `Err` if the oneshot channel to send the stop signal - /// has already been called once. pub async fn stop(self) -> Result, Error> { - self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; + self.state.task_killer.send(0).unwrap(); - let _ = self.state.job.await; + let _ = self.state.task.await; - let stopped_api_server: ApiServer = ApiServer { + Ok(ApiServer { cfg: self.cfg, - tracker: self.tracker, state: Stopped {}, - }; - - Ok(stopped_api_server) + }) } } -pub fn start_from_tcp_listener_with_graceful_shutdown( - tcp_listener: TcpListener, - tracker: &Arc, - shutdown_signal: tokio::sync::oneshot::Receiver, -) -> impl Future> { - let app = router(tracker); - - let context = tcp_listener.local_addr().expect("Could not get context."); - - axum::Server::from_tcp(tcp_listener) - .expect("Could not bind to tcp listener.") - .serve(app.into_make_service()) - .with_graceful_shutdown(shutdown_signal_with_message( - shutdown_signal, - format!("Shutting down {context}.."), - )) -} +struct Launcher; + +impl Launcher { + pub fn start( + cfg: &torrust_tracker_configuration::HttpApi, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); + let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); + let bind_addr = tcp_listener + .local_addr() + .expect("Could not get local_addr from tcp_listener."); + + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (&cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { + let server = Self::start_tls_with_graceful_shutdown( + tcp_listener, + (ssl_cert_path.to_string(), ssl_key_path.to_string()), + tracker, + shutdown_signal, + ); + + (bind_addr, server) + } else { + let server = Self::start_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); + + (bind_addr, server) + } + } -pub fn start_tls_from_tcp_listener_with_graceful_shutdown( - tcp_listener: TcpListener, - tls_config: RustlsConfig, - tracker: &Arc, - shutdown_signal: tokio::sync::oneshot::Receiver, -) -> impl Future> { - let app = router(tracker); + pub fn start_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(tracker); + + Box::pin(async { + axum::Server::from_tcp(tcp_listener) + .expect("Could not bind to tcp listener.") + .serve(app.into_make_service_with_connect_info::()) + .with_graceful_shutdown(shutdown_signal) + .await + .expect("Axum server crashed."); + }) + } - let context = tcp_listener.local_addr().expect("Could not get context."); + pub fn start_tls_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + (ssl_cert_path, ssl_key_path): (String, String), + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(tracker); - let handle = Handle::new(); + let handle = Handle::new(); - let cloned_handle = handle.clone(); + let cloned_handle = handle.clone(); - tokio::spawn(async move { - shutdown_signal_with_message(shutdown_signal, format!("Shutting down {context}..")).await; - cloned_handle.shutdown(); - }); + tokio::task::spawn_local(async move { + shutdown_signal.await; + cloned_handle.shutdown(); + }); - axum_server::from_tcp_rustls(tcp_listener, tls_config) - .handle(handle) - .serve(app.into_make_service()) + Box::pin(async { + let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) + .await + .expect("Could not read tls cert."); + + axum_server::from_tcp_rustls(tcp_listener, tls_config) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."); + }) + } } -pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future> { +pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future> { let app = router(tracker); let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); @@ -165,7 +183,7 @@ pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl Future, + tracker: Arc, ) -> impl Future> { let app = router(tracker); @@ -204,9 +222,9 @@ mod tests { let tracker = Arc::new(tracker::Tracker::new(cfg.clone(), None, statistics::Repo::new()).unwrap()); - let stopped_api_server = ApiServer::new(cfg.http_api.clone(), tracker); + let stopped_api_server = ApiServer::new(cfg.http_api.clone()); - let running_api_server_result = stopped_api_server.start(); + let running_api_server_result = stopped_api_server.start(tracker).await; assert!(running_api_server_result.is_ok()); diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs index 85bb1b59f..939b58638 100644 --- a/src/jobs/tracker_apis.rs +++ b/src/jobs/tracker_apis.rs @@ -31,7 +31,7 @@ pub async fn start_job(config: &HttpApi, tracker: Arc) -> Join if !ssl_enabled { info!("Starting Torrust APIs server on: http://{}", bind_addr); - let handle = server::start(bind_addr, &tracker); + let handle = server::start(bind_addr, tracker); tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); @@ -45,7 +45,7 @@ pub async fn start_job(config: &HttpApi, tracker: Arc) -> Join .await .unwrap(); - let handle = server::start_tls(bind_addr, ssl_config, &tracker); + let handle = server::start_tls(bind_addr, ssl_config, tracker); tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 468f6dbbd..57232855b 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -12,10 +12,10 @@ pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHan let bind_addr = config.bind_address.clone(); tokio::spawn(async move { - match Udp::new(tracker, &bind_addr).await { + match Udp::new(&bind_addr).await { Ok(udp_server) => { info!("Starting UDP server on: udp://{}", bind_addr); - udp_server.start().await; + udp_server.start(tracker).await; } Err(e) => { warn!("Could not start UDP tracker on: udp://{}", bind_addr); diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 211a0d1ba..e47a89dd4 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -11,12 +11,12 @@ use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; -use crate::tracker::{self, statistics}; +use crate::tracker::{statistics, Tracker}; use crate::udp::error::Error; use crate::udp::peer_builder; use crate::udp::request::AnnounceWrapper; -pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { +pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { message: format!("{e:?}"), location: Location::caller(), @@ -46,11 +46,7 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. -pub async fn handle_request( - request: Request, - remote_addr: SocketAddr, - tracker: Arc, -) -> Result { +pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: &Tracker) -> Result { match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, @@ -61,11 +57,7 @@ pub async fn handle_request( /// # Errors /// /// This function dose not ever return an error. -pub async fn handle_connect( - remote_addr: SocketAddr, - request: &ConnectRequest, - tracker: Arc, -) -> Result { +pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); @@ -90,7 +82,7 @@ pub async fn handle_connect( /// # Errors /// /// Will return `Error` if unable to `authenticate_request`. -pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), Error> { +pub async fn authenticate(info_hash: &InfoHash, tracker: &Tracker) -> Result<(), Error> { tracker .authenticate_request(info_hash, &None) .await @@ -105,7 +97,7 @@ pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, - tracker: Arc, + tracker: &Tracker, ) -> Result { debug!("udp announce request: {:#?}", announce_request); @@ -116,7 +108,7 @@ pub async fn handle_announce( let info_hash = wrapped_announce_request.info_hash; let remote_client_ip = remote_addr.ip(); - authenticate(&info_hash, tracker.clone()).await?; + authenticate(&info_hash, tracker).await?; let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); @@ -182,11 +174,7 @@ pub async fn handle_announce( /// # Errors /// /// This function dose not ever return an error. -pub async fn handle_scrape( - remote_addr: SocketAddr, - request: &ScrapeRequest, - tracker: Arc, -) -> Result { +pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { // Convert from aquatic infohashes let mut info_hashes = vec![]; for info_hash in &request.info_hashes { @@ -392,7 +380,7 @@ mod tests { transaction_id: TransactionId(0i32), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, initialized_public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &initialized_public_tracker()) .await .unwrap(); @@ -411,7 +399,7 @@ mod tests { transaction_id: TransactionId(0i32), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, initialized_public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &initialized_public_tracker()) .await .unwrap(); @@ -439,7 +427,7 @@ mod tests { let torrent_tracker = Arc::new( tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); - handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) + handle_connect(client_socket_address, &sample_connect_request(), &torrent_tracker) .await .unwrap(); } @@ -457,7 +445,7 @@ mod tests { let torrent_tracker = Arc::new( tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); - handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) + handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), &torrent_tracker) .await .unwrap(); } @@ -573,7 +561,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -593,11 +581,11 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, initialized_public_tracker()) + let response = handle_announce(remote_addr, &request, &initialized_public_tracker()) .await .unwrap(); - let empty_peer_vector: Vec> = vec![]; + let empty_peer_vector: Vec> = vec![]; assert_eq!( response, Response::from(AnnounceResponse { @@ -636,7 +624,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -667,7 +655,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() + handle_announce(remote_addr, &request, &tracker).await.unwrap() } #[tokio::test] @@ -704,7 +692,7 @@ mod tests { handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), - tracker.clone(), + &tracker, ) .await .unwrap(); @@ -740,7 +728,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -797,7 +785,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -820,11 +808,11 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, initialized_public_tracker()) + let response = handle_announce(remote_addr, &request, &initialized_public_tracker()) .await .unwrap(); - let empty_peer_vector: Vec> = vec![]; + let empty_peer_vector: Vec> = vec![]; assert_eq!( response, Response::from(AnnounceResponse { @@ -863,7 +851,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -897,7 +885,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() + handle_announce(remote_addr, &request, &tracker).await.unwrap() } #[tokio::test] @@ -937,9 +925,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - handle_announce(remote_addr, &announce_request, tracker.clone()) - .await - .unwrap(); + handle_announce(remote_addr, &announce_request, &tracker).await.unwrap(); } mod from_a_loopback_ip { @@ -982,7 +968,7 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); + handle_announce(remote_addr, &request, &tracker).await.unwrap(); let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; @@ -1036,7 +1022,7 @@ mod tests { info_hashes, }; - let response = handle_scrape(remote_addr, &request, initialized_public_tracker()) + let response = handle_scrape(remote_addr, &request, &initialized_public_tracker()) .await .unwrap(); @@ -1083,7 +1069,7 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap() + handle_scrape(remote_addr, &request, &tracker).await.unwrap() } fn match_scrape_response(response: Response) -> Option { @@ -1134,8 +1120,7 @@ mod tests { let request = build_scrape_request(&remote_addr, &non_existing_info_hash); - let torrent_stats = - match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); let expected_torrent_stats = vec![zeroed_torrent_statistics()]; @@ -1177,8 +1162,7 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - let torrent_stats = - match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); let expected_torrent_stats = vec![TorrentScrapeStatistics { seeders: NumberOfPeers(1), @@ -1200,8 +1184,7 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - let torrent_stats = - match_scrape_response(handle_scrape(remote_addr, &request, tracker.clone()).await.unwrap()).unwrap(); + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); let expected_torrent_stats = vec![zeroed_torrent_statistics()]; @@ -1246,7 +1229,7 @@ mod tests { tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); - handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) .await .unwrap(); } @@ -1278,7 +1261,7 @@ mod tests { tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); - handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) .await .unwrap(); } diff --git a/src/udp/server.rs b/src/udp/server.rs index f74468189..f3f90362d 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -27,7 +27,6 @@ pub type RunningUdpServer = UdpServer; #[allow(clippy::module_name_repetitions)] pub struct UdpServer { pub cfg: torrust_tracker_configuration::UdpTracker, - pub tracker: Arc, pub state: S, } @@ -40,19 +39,15 @@ pub struct Running { } impl UdpServer { - pub fn new(cfg: torrust_tracker_configuration::UdpTracker, tracker: Arc) -> Self { - Self { - cfg, - tracker, - state: Stopped {}, - } + pub fn new(cfg: torrust_tracker_configuration::UdpTracker) -> Self { + Self { cfg, state: Stopped {} } } /// # Errors /// /// Will return `Err` if UDP can't bind to given bind address. - pub async fn start(self) -> Result, Error> { - let udp = Udp::new(self.tracker.clone(), &self.cfg.bind_address) + pub async fn start(self, tracker: Arc) -> Result, Error> { + let udp = Udp::new(&self.cfg.bind_address) .await .map_err(|e| Error::Error(e.to_string()))?; @@ -61,12 +56,11 @@ impl UdpServer { let (sender, receiver) = tokio::sync::oneshot::channel::(); let job = tokio::spawn(async move { - udp.start_with_graceful_shutdown(shutdown_signal(receiver)).await; + udp.start_with_graceful_shutdown(tracker, shutdown_signal(receiver)).await; }); let running_udp_server: UdpServer = UdpServer { cfg: self.cfg, - tracker: self.tracker, state: Running { bind_address, stop_job_sender: sender, @@ -90,7 +84,6 @@ impl UdpServer { let stopped_api_server: UdpServer = UdpServer { cfg: self.cfg, - tracker: self.tracker, state: Stopped {}, }; @@ -100,30 +93,27 @@ impl UdpServer { pub struct Udp { socket: Arc, - tracker: Arc, } impl Udp { /// # Errors /// /// Will return `Err` unable to bind to the supplied `bind_address`. - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { + pub async fn new(bind_address: &str) -> tokio::io::Result { let socket = UdpSocket::bind(bind_address).await?; Ok(Udp { socket: Arc::new(socket), - tracker, }) } /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied “socket“. - pub async fn start(&self) { + pub async fn start(&self, tracker: Arc) { loop { let mut data = [0; MAX_PACKET_SIZE]; let socket = self.socket.clone(); - let tracker = self.tracker.clone(); tokio::select! { _ = tokio::signal::ctrl_c() => { @@ -137,7 +127,7 @@ impl Udp { debug!("From: {}", &remote_addr); debug!("Payload: {:?}", payload); - let response = handle_packet(remote_addr, payload, tracker).await; + let response = handle_packet(remote_addr, payload, &tracker).await; Udp::send_response(socket, remote_addr, response).await; } @@ -148,7 +138,7 @@ impl Udp { /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied “socket“. - async fn start_with_graceful_shutdown(&self, shutdown_signal: F) + async fn start_with_graceful_shutdown(&self, tracker: Arc, shutdown_signal: F) where F: Future, { @@ -158,7 +148,6 @@ impl Udp { loop { let mut data = [0; MAX_PACKET_SIZE]; let socket = self.socket.clone(); - let tracker = self.tracker.clone(); tokio::select! { _ = &mut shutdown_signal => { @@ -172,7 +161,7 @@ impl Udp { debug!("From: {}", &remote_addr); debug!("Payload: {:?}", payload); - let response = handle_packet(remote_addr, payload, tracker).await; + let response = handle_packet(remote_addr, payload, &tracker).await; Udp::send_response(socket, remote_addr, response).await; } diff --git a/tests/api/test_environment.rs b/tests/api/test_environment.rs index 1f8708650..b6f5ca990 100644 --- a/tests/api/test_environment.rs +++ b/tests/api/test_environment.rs @@ -4,7 +4,6 @@ use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServe use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use torrust_tracker_test_helpers::configuration; use super::connection_info::ConnectionInfo; use crate::common::tracker::new_tracker; @@ -15,6 +14,7 @@ pub type StoppedTestEnvironment = TestEnvironment; pub type RunningTestEnvironment = TestEnvironment; pub struct TestEnvironment { + pub cfg: Arc, pub tracker: Arc, pub state: S, } @@ -36,39 +36,45 @@ impl TestEnvironment { } impl TestEnvironment { - #[allow(dead_code)] - pub fn new_stopped() -> Self { - let api_server = api_server(); + pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { + let cfg = Arc::new(cfg); + + let tracker = new_tracker(cfg.clone()); + + let api_server = api_server(cfg.http_api.clone()); Self { - tracker: api_server.tracker.clone(), + cfg, + tracker, state: Stopped { api_server }, } } - #[allow(dead_code)] - pub fn start(self) -> TestEnvironment { + pub async fn start(self) -> TestEnvironment { TestEnvironment { - tracker: self.tracker, + cfg: self.cfg, + tracker: self.tracker.clone(), state: Running { - api_server: self.state.api_server.start().unwrap(), + api_server: self.state.api_server.start(self.tracker).await.unwrap(), }, } } + + pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpApi { + &mut self.state.api_server.cfg + } } impl TestEnvironment { - pub fn new_running() -> Self { - let api_server = running_api_server(); + pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { + let test_env = StoppedTestEnvironment::new_stopped(cfg); - Self { - tracker: api_server.tracker.clone(), - state: Running { api_server }, - } + test_env.start().await } pub async fn stop(self) -> TestEnvironment { TestEnvironment { + cfg: self.cfg, tracker: self.tracker, state: Stopped { api_server: self.state.api_server.stop().await.unwrap(), @@ -78,25 +84,22 @@ impl TestEnvironment { pub fn get_connection_info(&self) -> ConnectionInfo { ConnectionInfo { - bind_address: self.state.api_server.state.bind_address.to_string(), + bind_address: self.state.api_server.state.bind_addr.to_string(), api_token: self.state.api_server.cfg.access_tokens.get("admin").cloned(), } } } #[allow(clippy::module_name_repetitions)] -pub fn running_test_environment() -> RunningTestEnvironment { - TestEnvironment::new_running() +pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { + TestEnvironment::new_stopped(cfg) } -pub fn api_server() -> StoppedApiServer { - let config = Arc::new(configuration::ephemeral()); - - let tracker = new_tracker(config.clone()); - - ApiServer::new(config.http_api.clone(), tracker) +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { + TestEnvironment::new_running(cfg).await } -pub fn running_api_server() -> RunningApiServer { - api_server().start().unwrap() +pub fn api_server(cfg: torrust_tracker_configuration::HttpApi) -> StoppedApiServer { + ApiServer::new(cfg) } diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index ccdcded5e..d00c7d68c 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -9,7 +9,6 @@ mod api; mod common; mod tracker_apis { - use crate::common::fixtures::invalid_info_hashes; // When these infohashes are used in URL path params @@ -24,7 +23,29 @@ mod tracker_apis { [String::new(), " ".to_string()].to_vec() } + mod configuration { + use torrust_tracker_test_helpers::configuration; + + use crate::api::test_environment::stopped_test_environment; + + #[tokio::test] + #[should_panic] + async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { + let mut test_env = stopped_test_environment(configuration::ephemeral()); + + let cfg = test_env.config_mut(); + + cfg.ssl_enabled = true; + cfg.ssl_key_path = Some("bad key path".to_string()); + cfg.ssl_cert_path = Some("bad cert path".to_string()); + + test_env.start().await; + } + } + mod authentication { + use torrust_tracker_test_helpers::configuration; + use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; use crate::api::test_environment::running_test_environment; @@ -32,7 +53,7 @@ mod tracker_apis { #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let token = test_env.get_connection_info().api_token.unwrap(); @@ -47,7 +68,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::default()) @@ -60,7 +81,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) @@ -73,7 +94,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(test_env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) @@ -86,7 +107,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let token = test_env.get_connection_info().api_token.unwrap(); @@ -113,6 +134,7 @@ mod tracker_apis { use torrust_tracker::apis::resources::stats::Stats; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::api::client::Client; @@ -122,7 +144,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; test_env .add_torrent_peer( @@ -161,7 +183,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(connection_with_invalid_token( test_env.get_connection_info().bind_address.as_str(), @@ -187,6 +209,7 @@ mod tracker_apis { use torrust_tracker::apis::resources::torrent::Torrent; use torrust_tracker::apis::resources::{self, torrent}; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ @@ -201,7 +224,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_getting_torrents() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -226,7 +249,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -256,7 +279,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -286,7 +309,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; @@ -303,7 +326,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; @@ -320,7 +343,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let response = Client::new(connection_with_invalid_token( test_env.get_connection_info().bind_address.as_str(), @@ -341,7 +364,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -370,7 +393,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -385,7 +408,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(test_env.get_connection_info()) @@ -408,7 +431,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); @@ -436,6 +459,7 @@ mod tracker_apis { use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::api::asserts::{ @@ -450,7 +474,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -471,7 +495,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -488,7 +512,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -511,7 +535,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -528,7 +552,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(test_env.get_connection_info()) @@ -551,7 +575,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -569,7 +593,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -584,7 +608,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(test_env.get_connection_info()) @@ -607,7 +631,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -626,7 +650,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -652,7 +676,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -677,7 +701,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -697,6 +721,7 @@ mod tracker_apis { use std::time::Duration; use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; use crate::api::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, @@ -710,7 +735,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; @@ -732,7 +757,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; @@ -755,7 +780,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let invalid_key_durations = [ // "", it returns 404 @@ -763,9 +788,9 @@ mod tracker_apis { "-1", "text", ]; - for invalid_key_duration in &invalid_key_durations { + for invalid_key_duration in invalid_key_durations { let response = Client::new(test_env.get_connection_info()) - .post(&format!("key/{}", &invalid_key_duration)) + .post(&format!("key/{}", invalid_key_duration)) .await; assert_invalid_key_duration_param(response, invalid_key_duration).await; @@ -776,7 +801,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; force_database_error(&test_env.tracker); @@ -792,7 +817,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; let auth_key = test_env @@ -812,7 +837,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let invalid_auth_keys = [ // "", it returns a 404 @@ -837,7 +862,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; let auth_key = test_env @@ -859,7 +884,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; @@ -896,7 +921,7 @@ mod tracker_apis { #[tokio::test] async fn should_allow_reloading_keys() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; test_env @@ -914,7 +939,7 @@ mod tracker_apis { #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; test_env @@ -934,7 +959,7 @@ mod tracker_apis { #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let test_env = running_test_environment(); + let test_env = running_test_environment(configuration::ephemeral()).await; let seconds_valid = 60; test_env diff --git a/tests/udp/test_environment.rs b/tests/udp/test_environment.rs index 02d51c4bf..f729777a1 100644 --- a/tests/udp/test_environment.rs +++ b/tests/udp/test_environment.rs @@ -5,7 +5,6 @@ use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; -use torrust_tracker_test_helpers::configuration; use crate::common::tracker::new_tracker; @@ -15,6 +14,7 @@ pub type StoppedTestEnvironment = TestEnvironment; pub type RunningTestEnvironment = TestEnvironment; pub struct TestEnvironment { + pub cfg: Arc, pub tracker: Arc, pub state: S, } @@ -38,39 +38,41 @@ impl TestEnvironment { impl TestEnvironment { #[allow(dead_code)] - pub fn new_stopped() -> Self { - let udp_server = udp_server(); + pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { + let cfg = Arc::new(cfg); + + let tracker = new_tracker(cfg.clone()); + + let udp_server = udp_server(cfg.udp_trackers[0].clone()); Self { - tracker: udp_server.tracker.clone(), - state: Stopped { udp_server: udp_server }, + cfg, + tracker, + state: Stopped { udp_server }, } } #[allow(dead_code)] pub async fn start(self) -> TestEnvironment { TestEnvironment { - tracker: self.tracker, + cfg: self.cfg, + tracker: self.tracker.clone(), state: Running { - udp_server: self.state.udp_server.start().await.unwrap(), + udp_server: self.state.udp_server.start(self.tracker).await.unwrap(), }, } } } impl TestEnvironment { - pub async fn new_running() -> Self { - let udp_server = running_udp_server().await; - - Self { - tracker: udp_server.tracker.clone(), - state: Running { udp_server: udp_server }, - } + pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { + StoppedTestEnvironment::new_stopped(cfg).start().await } #[allow(dead_code)] pub async fn stop(self) -> TestEnvironment { TestEnvironment { + cfg: self.cfg, tracker: self.tracker, state: Stopped { udp_server: self.state.udp_server.stop().await.unwrap(), @@ -83,19 +85,16 @@ impl TestEnvironment { } } -#[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment() -> RunningTestEnvironment { - TestEnvironment::new_running().await +#[allow(clippy::module_name_repetitions, dead_code)] +pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { + TestEnvironment::new_stopped(cfg) } -pub fn udp_server() -> StoppedUdpServer { - let config = Arc::new(configuration::ephemeral()); - - let tracker = new_tracker(config.clone()); - - UdpServer::new(config.udp_trackers[0].clone(), tracker) +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { + TestEnvironment::new_running(cfg).await } -pub async fn running_udp_server() -> RunningUdpServer { - udp_server().start().await.unwrap() +pub fn udp_server(cfg: torrust_tracker_configuration::UdpTracker) -> StoppedUdpServer { + UdpServer::new(cfg) } diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs index b7cc3bd6f..0f9283a8b 100644 --- a/tests/udp_tracker.rs +++ b/tests/udp_tracker.rs @@ -17,6 +17,7 @@ mod udp_tracker_server { use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; use torrust_tracker::udp::MAX_PACKET_SIZE; + use torrust_tracker_test_helpers::configuration; use crate::udp::asserts::is_error_response; use crate::udp::client::{new_udp_client_connected, UdpTrackerClient}; @@ -45,7 +46,7 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let test_env = running_test_environment().await; + let test_env = running_test_environment(configuration::ephemeral()).await; let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; @@ -60,6 +61,7 @@ mod udp_tracker_server { mod receiving_a_connection_request { use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; use crate::udp::asserts::is_connect_response; use crate::udp::client::new_udp_tracker_client_connected; @@ -67,7 +69,7 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_a_connect_response() { - let test_env = running_test_environment().await; + let test_env = running_test_environment(configuration::ephemeral()).await; let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; @@ -90,6 +92,7 @@ mod udp_tracker_server { AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, TransactionId, }; + use torrust_tracker_test_helpers::configuration; use crate::udp::asserts::is_ipv4_announce_response; use crate::udp::client::new_udp_tracker_client_connected; @@ -98,7 +101,7 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_an_announce_response() { - let test_env = running_test_environment().await; + let test_env = running_test_environment(configuration::ephemeral()).await; let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; @@ -131,6 +134,7 @@ mod udp_tracker_server { mod receiving_an_scrape_request { use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; use crate::udp::asserts::is_scrape_response; use crate::udp::client::new_udp_tracker_client_connected; @@ -139,7 +143,7 @@ mod udp_tracker_server { #[tokio::test] async fn should_return_a_scrape_response() { - let test_env = running_test_environment().await; + let test_env = running_test_environment(configuration::ephemeral()).await; let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; From a611fbd26acc13d67f24fa51767646207044de4a Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 9 Mar 2023 11:31:17 +0100 Subject: [PATCH 396/435] chore: fix clippy errors --- src/apis/routes.rs | 1 + src/apis/server.rs | 18 +++++++++++++++--- src/http/axum_implementation/routes.rs | 1 + src/http/server.rs | 17 ++++++++++++++--- src/udp/server.rs | 1 + tests/common/tracker.rs | 1 + tests/tracker_api.rs | 2 +- 7 files changed, 34 insertions(+), 7 deletions(-) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 6e3218605..ecc51090c 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -10,6 +10,7 @@ use super::handlers::{ use super::middlewares::auth::auth; use crate::tracker::Tracker; +#[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { Router::new() // Stats diff --git a/src/apis/server.rs b/src/apis/server.rs index 0a5013161..a283bbc54 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -38,10 +38,14 @@ pub struct Running { } impl ApiServer { + #[must_use] pub fn new(cfg: torrust_tracker_configuration::HttpApi) -> Self { Self { cfg, state: Stopped {} } } + /// # Errors + /// + /// It would return an error if no `SocketAddr` is returned after launching the server. pub async fn start(self, tracker: Arc) -> Result, Error> { let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); @@ -51,12 +55,14 @@ impl ApiServer { let task = tokio::spawn(async move { let (bind_addr, server) = Launcher::start(&configuration, tracker, shutdown_signal(shutdown_receiver)); - addr_sender.send(bind_addr).unwrap(); + addr_sender.send(bind_addr).expect("Could not return SocketAddr."); server.await; }); - let bind_address = addr_receiver.await.expect("Could not receive bind_address."); + let bind_address = addr_receiver + .await + .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; Ok(ApiServer { cfg: self.cfg, @@ -70,8 +76,14 @@ impl ApiServer { } impl ApiServer { + /// # Errors + /// + /// It would return an error if the channel for the task killer signal was closed. pub async fn stop(self) -> Result, Error> { - self.state.task_killer.send(0).unwrap(); + self.state + .task_killer + .send(0) + .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; let _ = self.state.task.await; diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index b0f30453d..acde5f662 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -7,6 +7,7 @@ use axum_client_ip::SecureClientIpSource; use super::handlers::{announce, scrape}; use crate::tracker::Tracker; +#[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { Router::new() // Announce request diff --git a/src/http/server.rs b/src/http/server.rs index e7b6c1888..98160777c 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -56,6 +56,9 @@ impl HttpServer> { } } + /// # Errors + /// + /// It would return an error if no `SocketAddr` is returned after launching the server. pub async fn start(self, tracker: Arc) -> Result>, Error> { let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); @@ -67,14 +70,16 @@ impl HttpServer> { let (bind_addr, server) = launcher.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); - addr_sender.send(bind_addr).unwrap(); + addr_sender.send(bind_addr).expect("Could not return SocketAddr."); server.await; launcher }); - let bind_address = addr_receiver.await.expect("Could not receive bind_address."); + let bind_address = addr_receiver + .await + .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; Ok(HttpServer { cfg: self.cfg, @@ -88,8 +93,14 @@ impl HttpServer> { } impl HttpServer> { + /// # Errors + /// + /// It would return an error if the channel for the task killer signal was closed. pub async fn stop(self) -> Result>, Error> { - self.state.task_killer.send(0).unwrap(); + self.state + .task_killer + .send(0) + .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; diff --git a/src/udp/server.rs b/src/udp/server.rs index f3f90362d..e52b8fd52 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -39,6 +39,7 @@ pub struct Running { } impl UdpServer { + #[must_use] pub fn new(cfg: torrust_tracker_configuration::UdpTracker) -> Self { Self { cfg, state: Stopped {} } } diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index ed2d8392b..127cfefc4 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -5,6 +5,7 @@ use torrust_tracker::tracker::Tracker; use torrust_tracker::{ephemeral_instance_keys, logging, static_time}; // TODO: Move to test-helpers crate once `Tracker` is isolated. +#[allow(clippy::module_name_repetitions)] pub fn new_tracker(configuration: Arc) -> Arc { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index d00c7d68c..dac5907c2 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -790,7 +790,7 @@ mod tracker_apis { for invalid_key_duration in invalid_key_durations { let response = Client::new(test_env.get_connection_info()) - .post(&format!("key/{}", invalid_key_duration)) + .post(&format!("key/{invalid_key_duration}")) .await; assert_invalid_key_duration_param(response, invalid_key_duration).await; From cf9e9a9be268606b4b466127c583f3023167c10a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 17:30:59 +0000 Subject: [PATCH 397/435] fix: merge conflicts --- src/http/axum_implementation/handlers/announce.rs | 2 +- src/http/axum_implementation/handlers/scrape.rs | 2 +- src/http/axum_implementation/routes.rs | 2 +- src/http/axum_implementation/services/announce.rs | 8 ++++---- src/tracker/mod.rs | 9 +++++---- tests/http_tracker.rs | 2 ++ 6 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 6458e2c2f..1f1d7e176 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -182,7 +182,7 @@ mod tests { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 43bf6c99f..99bde0087 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -140,7 +140,7 @@ mod tests { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/http/axum_implementation/routes.rs b/src/http/axum_implementation/routes.rs index acde5f662..a8e740f69 100644 --- a/src/http/axum_implementation/routes.rs +++ b/src/http/axum_implementation/routes.rs @@ -15,7 +15,7 @@ pub fn router(tracker: Arc) -> Router { .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) // Scrape request .route("/scrape", get(scrape::handle_without_key).with_state(tracker.clone())) - .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker.clone())) + .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker)) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) } diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 255a73c8f..479fb9d2b 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -51,7 +51,7 @@ mod tests { let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - match Tracker::new(&Arc::new(configuration), Some(stats_event_sender), stats_repository) { + match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) @@ -137,7 +137,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - &Arc::new(configuration::ephemeral()), + Arc::new(configuration::ephemeral()), Some(stats_event_sender), statistics::Repo::new(), ) @@ -154,7 +154,7 @@ mod tests { configuration.external_ip = Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); - Tracker::new(&Arc::new(configuration), Some(stats_event_sender), statistics::Repo::new()).unwrap() + Tracker::new(Arc::new(configuration), Some(stats_event_sender), statistics::Repo::new()).unwrap() } fn peer_with_the_ipv4_loopback_ip() -> Peer { @@ -201,7 +201,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - &Arc::new(configuration::ephemeral()), + Arc::new(configuration::ephemeral()), Some(stats_event_sender), statistics::Repo::new(), ) diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 874233d91..aae22f9b7 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -552,6 +552,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_primitives::TrackerMode; @@ -566,25 +567,25 @@ mod tests { pub fn public_tracker() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.mode = TrackerMode::Public; - tracker_factory(configuration) + tracker_factory(Arc::new(configuration)) } pub fn private_tracker() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.mode = TrackerMode::Private; - tracker_factory(configuration) + tracker_factory(Arc::new(configuration)) } pub fn whitelisted_tracker() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.mode = TrackerMode::Listed; - tracker_factory(configuration) + tracker_factory(Arc::new(configuration)) } pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.persistent_torrent_completed_stat = true; - tracker_factory(configuration) + tracker_factory(Arc::new(configuration)) } fn sample_info_hash() -> InfoHash { diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index a4e87115a..d83ccbd0c 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1198,6 +1198,7 @@ mod warp_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::assert_is_announce_response; use crate::http::asserts_warp::{ @@ -2616,6 +2617,7 @@ mod axum_test_env { use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::http::client::Client; From 0097c85c2f896d0686d7cfdcf21304b728bf4b61 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 17:35:59 +0000 Subject: [PATCH 398/435] fix: clippy warnings --- tests/http_tracker.rs | 264 ++++++++++++++++-------------------------- 1 file changed, 102 insertions(+), 162 deletions(-) diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index d83ccbd0c..3a5e84525 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -61,9 +61,7 @@ mod warp_test_env { let params = QueryBuilder::default().query().params(); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_could_not_find_remote_address_on_xff_header_error_response(response).await; @@ -76,7 +74,7 @@ mod warp_test_env { let params = QueryBuilder::default().query().params(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; @@ -131,9 +129,7 @@ mod warp_test_env { params.remove_optional_params(); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; @@ -144,7 +140,7 @@ mod warp_test_env { async fn should_fail_when_the_url_query_component_is_empty() { let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(test_env.bind_address().clone()).get("announce").await; + let response = Client::new(*test_env.bind_address()).get("announce").await; assert_internal_server_error_response(response).await; } @@ -159,9 +155,7 @@ mod warp_test_env { params.info_hash = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_invalid_info_hash_error_response(response).await; @@ -171,9 +165,7 @@ mod warp_test_env { params.peer_id = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_invalid_peer_id_error_response(response).await; @@ -183,9 +175,7 @@ mod warp_test_env { params.port = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; @@ -201,9 +191,7 @@ mod warp_test_env { for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_invalid_info_hash_error_response(response).await; } @@ -224,9 +212,7 @@ mod warp_test_env { params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; @@ -244,9 +230,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -265,9 +249,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -293,9 +275,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_invalid_peer_id_error_response(response).await; } @@ -314,9 +294,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -335,9 +313,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -366,9 +342,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; } @@ -387,9 +361,7 @@ mod warp_test_env { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_internal_server_error_response(response).await; } @@ -401,7 +373,7 @@ mod warp_test_env { async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -439,7 +411,7 @@ mod warp_test_env { test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -481,7 +453,7 @@ mod warp_test_env { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; + let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; assert_empty_announce_response(response).await; @@ -506,7 +478,7 @@ mod warp_test_env { test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -549,7 +521,7 @@ mod warp_test_env { // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -574,7 +546,7 @@ mod warp_test_env { async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) .await; @@ -591,7 +563,7 @@ mod warp_test_env { async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; @@ -610,7 +582,7 @@ mod warp_test_env { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -631,7 +603,7 @@ mod warp_test_env { async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) .await; @@ -648,7 +620,7 @@ mod warp_test_env { async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; @@ -667,7 +639,7 @@ mod warp_test_env { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -691,7 +663,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -727,7 +699,7 @@ mod warp_test_env { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -763,7 +735,7 @@ mod warp_test_env { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -794,7 +766,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(test_env.bind_address().clone()); + let client = Client::new(*test_env.bind_address()); let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); @@ -844,7 +816,7 @@ mod warp_test_env { #[tokio::test] async fn should_fail_when_the_request_is_empty() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(test_env.bind_address().clone()).get("scrape").await; + let response = Client::new(*test_env.bind_address()).get("scrape").await; assert_internal_server_error_response(response).await; @@ -860,9 +832,7 @@ mod warp_test_env { for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; // code-review: it's not returning the invalid info hash error assert_internal_server_error_response(response).await; @@ -887,7 +857,7 @@ mod warp_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -927,7 +897,7 @@ mod warp_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -957,7 +927,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -977,7 +947,7 @@ mod warp_test_env { let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -1002,7 +972,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1025,7 +995,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1064,7 +1034,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1086,7 +1056,7 @@ mod warp_test_env { .await .expect("should add the torrent to the whitelist"); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1127,7 +1097,7 @@ mod warp_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1164,7 +1134,7 @@ mod warp_test_env { .await .expect("should add the torrent to the whitelist"); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1215,7 +1185,7 @@ mod warp_test_env { let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), key.id()) .announce(&QueryBuilder::default().query()) .await; @@ -1230,7 +1200,7 @@ mod warp_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -1244,7 +1214,7 @@ mod warp_test_env { // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) + let response = Client::authenticated(*test_env.bind_address(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -1288,7 +1258,7 @@ mod warp_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1321,7 +1291,7 @@ mod warp_test_env { let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), key.id()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1365,7 +1335,7 @@ mod warp_test_env { let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), false_key) + let response = Client::authenticated(*test_env.bind_address(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1414,9 +1384,7 @@ mod axum_test_env { let params = QueryBuilder::default().query().params(); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; @@ -1429,7 +1397,7 @@ mod axum_test_env { let params = QueryBuilder::default().query().params(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; @@ -1483,9 +1451,7 @@ mod axum_test_env { params.remove_optional_params(); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; @@ -1496,7 +1462,7 @@ mod axum_test_env { async fn should_fail_when_the_url_query_component_is_empty() { let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(test_env.bind_address().clone()).get("announce").await; + let response = Client::new(*test_env.bind_address()).get("announce").await; assert_missing_query_params_for_announce_request_error_response(response).await; @@ -1509,7 +1475,7 @@ mod axum_test_env { let invalid_query_param = "a=b=c"; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get(&format!("announce?{invalid_query_param}")) .await; @@ -1528,9 +1494,7 @@ mod axum_test_env { params.info_hash = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param info_hash").await; @@ -1540,9 +1504,7 @@ mod axum_test_env { params.peer_id = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param peer_id").await; @@ -1552,9 +1514,7 @@ mod axum_test_env { params.port = None; - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param port").await; @@ -1570,9 +1530,7 @@ mod axum_test_env { for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_cannot_parse_query_params_error_response(response, "").await; } @@ -1593,9 +1551,7 @@ mod axum_test_env { params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; @@ -1613,9 +1569,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1634,9 +1588,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1662,9 +1614,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1683,9 +1633,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1704,9 +1652,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1733,9 +1679,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1754,9 +1698,7 @@ mod axum_test_env { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } @@ -1768,7 +1710,7 @@ mod axum_test_env { async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -1806,7 +1748,7 @@ mod axum_test_env { test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1855,7 +1797,7 @@ mod axum_test_env { test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; // Announce the new Peer. - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1898,7 +1840,7 @@ mod axum_test_env { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(test_env.bind_address().clone()).announce(&announce_query).await; + let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; assert_empty_announce_response(response).await; @@ -1923,7 +1865,7 @@ mod axum_test_env { test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1966,7 +1908,7 @@ mod axum_test_env { // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -1991,7 +1933,7 @@ mod axum_test_env { async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) .await; @@ -2008,7 +1950,7 @@ mod axum_test_env { async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; @@ -2027,7 +1969,7 @@ mod axum_test_env { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -2048,7 +1990,7 @@ mod axum_test_env { async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().query()) .await; @@ -2065,7 +2007,7 @@ mod axum_test_env { async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; @@ -2084,7 +2026,7 @@ mod axum_test_env { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -2108,7 +2050,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -2144,7 +2086,7 @@ mod axum_test_env { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -2180,7 +2122,7 @@ mod axum_test_env { let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(test_env.bind_address().clone(), client_ip); + let client = Client::bind(*test_env.bind_address(), client_ip); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -2211,7 +2153,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(test_env.bind_address().clone()); + let client = Client::new(*test_env.bind_address()); let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); @@ -2265,7 +2207,7 @@ mod axum_test_env { #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(test_env.bind_address().clone()).get("scrape").await; + let response = Client::new(*test_env.bind_address()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; @@ -2281,9 +2223,7 @@ mod axum_test_env { for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(test_env.bind_address().clone()) - .get(&format!("announce?{params}")) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; assert_cannot_parse_query_params_error_response(response, "").await; } @@ -2307,7 +2247,7 @@ mod axum_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2347,7 +2287,7 @@ mod axum_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2377,7 +2317,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2397,7 +2337,7 @@ mod axum_test_env { let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -2422,7 +2362,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(test_env.bind_address().clone()) + Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2445,7 +2385,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(test_env.bind_address().clone(), IpAddr::from_str("::1").unwrap()) + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2484,7 +2424,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2505,7 +2445,7 @@ mod axum_test_env { .await .expect("should add the torrent to the whitelist"); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2546,7 +2486,7 @@ mod axum_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2583,7 +2523,7 @@ mod axum_test_env { .await .expect("should add the torrent to the whitelist"); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2631,7 +2571,7 @@ mod axum_test_env { let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), key.id()) .announce(&QueryBuilder::default().query()) .await; @@ -2646,7 +2586,7 @@ mod axum_test_env { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; @@ -2662,7 +2602,7 @@ mod axum_test_env { let invalid_key = "INVALID_KEY"; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get(&format!( "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) @@ -2679,7 +2619,7 @@ mod axum_test_env { // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), unregistered_key) + let response = Client::authenticated(*test_env.bind_address(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; @@ -2714,7 +2654,7 @@ mod axum_test_env { let invalid_key = "INVALID_KEY"; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .get(&format!( "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" )) @@ -2739,7 +2679,7 @@ mod axum_test_env { ) .await; - let response = Client::new(test_env.bind_address().clone()) + let response = Client::new(*test_env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2772,7 +2712,7 @@ mod axum_test_env { let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), key.id()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -2817,7 +2757,7 @@ mod axum_test_env { let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(test_env.bind_address().clone(), false_key) + let response = Client::authenticated(*test_env.bind_address(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) From 4ad981574ac319ccd9fcd86c425caf0a0ae82168 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 18:26:10 +0000 Subject: [PATCH 399/435] refactor: remove duplicate code --- packages/test-helpers/src/configuration.rs | 9 ++ .../axum_implementation/handlers/announce.rs | 42 ++-------- .../axum_implementation/handlers/scrape.rs | 42 ++-------- .../axum_implementation/services/announce.rs | 24 +----- src/tracker/mod.rs | 19 ++--- src/udp/handlers.rs | 83 ++++++++----------- tests/http_tracker.rs | 9 +- 7 files changed, 70 insertions(+), 158 deletions(-) diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index ec29fdbe1..0b7a269ff 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -55,6 +55,15 @@ pub fn ephemeral_with_reverse_proxy() -> Configuration { cfg } +#[must_use] +pub fn ephemeral_without_reverse_proxy() -> Configuration { + let mut cfg = ephemeral(); + + cfg.on_reverse_proxy = false; + + cfg +} + #[must_use] pub fn ephemeral_mode_public() -> Configuration { let mut cfg = ephemeral(); diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/axum_implementation/handlers/announce.rs index 1f1d7e176..ebb8c8586 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/axum_implementation/handlers/announce.rs @@ -138,56 +138,30 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { #[cfg(test)] mod tests { - use std::sync::Arc; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::http::axum_implementation::requests::announce::Announce; use crate::http::axum_implementation::responses; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; - use crate::tracker::statistics::Keeper; + use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; fn private_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Private; - tracker_factory(configuration) + tracker_factory(configuration::ephemeral_mode_private().into()) } - fn listed_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Listed; - tracker_factory(configuration) + fn whitelisted_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_whitelisted().into()) } fn tracker_on_reverse_proxy() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.on_reverse_proxy = true; - tracker_factory(configuration) + tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) } fn tracker_not_on_reverse_proxy() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.on_reverse_proxy = false; - tracker_factory(configuration) - } - - fn tracker_factory(configuration: Configuration) -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } + tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) } fn sample_announce_request() -> Announce { @@ -263,13 +237,13 @@ mod tests { use std::sync::Arc; - use super::{listed_tracker, sample_announce_request, sample_client_ip_sources}; + use super::{sample_announce_request, sample_client_ip_sources, whitelisted_tracker}; use crate::http::axum_implementation::handlers::announce::handle_announce; use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let tracker = Arc::new(listed_tracker()); + let tracker = Arc::new(whitelisted_tracker()); let announce_request = sample_announce_request(); diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/axum_implementation/handlers/scrape.rs index 99bde0087..fd316882d 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/axum_implementation/handlers/scrape.rs @@ -96,56 +96,30 @@ fn build_response(scrape_data: ScrapeData) -> Response { mod tests { use std::net::IpAddr; use std::str::FromStr; - use std::sync::Arc; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::http::axum_implementation::requests::scrape::Scrape; use crate::http::axum_implementation::responses; use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; - use crate::tracker::statistics::Keeper; + use crate::tracker::services::common::tracker_factory; use crate::tracker::Tracker; fn private_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Private; - tracker_factory(configuration) + tracker_factory(configuration::ephemeral_mode_private().into()) } - fn listed_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Listed; - tracker_factory(configuration) + fn whitelisted_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_whitelisted().into()) } fn tracker_on_reverse_proxy() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.on_reverse_proxy = true; - tracker_factory(configuration) + tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) } fn tracker_not_on_reverse_proxy() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.on_reverse_proxy = false; - tracker_factory(configuration) - } - - fn tracker_factory(configuration: Configuration) -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } + tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) } fn sample_scrape_request() -> Scrape { @@ -214,13 +188,13 @@ mod tests { use std::sync::Arc; - use super::{listed_tracker, sample_client_ip_sources, sample_scrape_request}; + use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; use crate::http::axum_implementation::handlers::scrape::handle_scrape; use crate::tracker::ScrapeData; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { - let tracker = Arc::new(listed_tracker()); + let tracker = Arc::new(whitelisted_tracker()); let scrape_request = sample_scrape_request(); diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/axum_implementation/services/announce.rs index 479fb9d2b..73d6ed468 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/axum_implementation/services/announce.rs @@ -26,37 +26,17 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; - use crate::tracker::statistics::Keeper; + use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Public; - tracker_factory(configuration) - } - - fn tracker_factory(configuration: Configuration) -> Tracker { - // code-review: the tracker initialization is duplicated in many places. Consider make this function public. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(Arc::new(configuration), Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } + tracker_factory(configuration::ephemeral_mode_public().into()) } fn sample_info_hash() -> InfoHash { diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index aae22f9b7..326afbf00 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -555,7 +555,6 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::DurationSinceUnixEpoch; @@ -564,22 +563,16 @@ mod tests { use crate::tracker::services::common::tracker_factory; use crate::tracker::{TorrentsMetrics, Tracker}; - pub fn public_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Public; - tracker_factory(Arc::new(configuration)) + fn public_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_public().into()) } - pub fn private_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Private; - tracker_factory(Arc::new(configuration)) + fn private_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_private().into()) } - pub fn whitelisted_tracker() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.mode = TrackerMode::Listed; - tracker_factory(Arc::new(configuration)) + fn whitelisted_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_whitelisted().into()) } pub fn tracker_persisting_torrents_in_database() -> Tracker { diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index e47a89dd4..41b1184dc 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -239,11 +239,11 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::TrackerMode; use torrust_tracker_test_helpers::configuration; use crate::protocol::clock::{Current, Time}; - use crate::tracker::{self, peer, statistics}; + use crate::tracker::services::common::tracker_factory; + use crate::tracker::{self, peer}; fn tracker_configuration() -> Arc { Arc::new(default_testing_tracker_configuration()) @@ -253,24 +253,20 @@ mod tests { configuration::ephemeral() } - fn initialized_public_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - initialized_tracker(configuration) + fn public_tracker() -> Arc { + initialized_tracker(configuration::ephemeral_mode_public().into()) } - fn initialized_private_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - initialized_tracker(configuration) + fn private_tracker() -> Arc { + initialized_tracker(configuration::ephemeral_mode_private().into()) } - fn initialized_whitelisted_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - initialized_tracker(configuration) + fn whitelisted_tracker() -> Arc { + initialized_tracker(configuration::ephemeral_mode_whitelisted().into()) } fn initialized_tracker(configuration: Arc) -> Arc { - let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); - Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) + tracker_factory(configuration).into() } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -344,11 +340,6 @@ mod tests { self } - pub fn with_mode(mut self, mode: TrackerMode) -> Self { - self.configuration.mode = mode; - self - } - pub fn into(self) -> Configuration { self.configuration } @@ -366,7 +357,7 @@ mod tests { use crate::tracker::{self, statistics}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_connect; - use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -380,7 +371,7 @@ mod tests { transaction_id: TransactionId(0i32), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &initialized_public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) .await .unwrap(); @@ -399,7 +390,7 @@ mod tests { transaction_id: TransactionId(0i32), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &initialized_public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) .await .unwrap(); @@ -539,12 +530,12 @@ mod tests { use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - initialized_public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, + public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, }; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let client_ip = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; @@ -581,9 +572,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, &initialized_public_tracker()) - .await - .unwrap(); + let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -604,7 +593,7 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -660,7 +649,7 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); add_a_torrent_peer_using_ipv6(tracker.clone()).await; @@ -707,11 +696,11 @@ mod tests { use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; + use crate::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let client_ip = Ipv4Addr::new(127, 0, 0, 1); let client_port = 8080; @@ -762,12 +751,12 @@ mod tests { use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ - initialized_public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, + public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, }; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -808,9 +797,7 @@ mod tests { .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, &initialized_public_tracker()) - .await - .unwrap(); + let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -831,7 +818,7 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -890,7 +877,7 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); add_a_torrent_peer_using_ipv4(tracker.clone()).await; @@ -999,7 +986,7 @@ mod tests { use crate::tracker::{self, peer}; use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1022,9 +1009,7 @@ mod tests { info_hashes, }; - let response = handle_scrape(remote_addr, &request, &initialized_public_tracker()) - .await - .unwrap(); + let response = handle_scrape(remote_addr, &request, &public_tracker()).await.unwrap(); let expected_torrent_stats = vec![zeroed_torrent_statistics()]; @@ -1082,12 +1067,12 @@ mod tests { mod with_a_public_tracker { use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handlers::tests::initialized_public_tracker; + use crate::udp::handlers::tests::public_tracker; use crate::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let tracker = initialized_public_tracker(); + let tracker = public_tracker(); let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); @@ -1109,11 +1094,11 @@ mod tests { use crate::udp::handlers::tests::scrape_request::{ add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::udp::handlers::tests::{initialized_private_tracker, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{private_tracker, sample_ipv4_remote_addr}; #[tokio::test] async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { - let tracker = initialized_private_tracker(); + let tracker = private_tracker(); let remote_addr = sample_ipv4_remote_addr(); let non_existing_info_hash = InfoHash([0u8; 20]); @@ -1130,7 +1115,7 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_tracker_has_the_requested_torrent_because_authenticated_requests_are_not_supported_in_udp_tracker( ) { - let tracker = initialized_private_tracker(); + let tracker = private_tracker(); let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await).unwrap(); @@ -1147,11 +1132,11 @@ mod tests { use crate::udp::handlers::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::udp::handlers::tests::{initialized_whitelisted_tracker, sample_ipv4_remote_addr}; + use crate::udp::handlers::tests::{sample_ipv4_remote_addr, whitelisted_tracker}; #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let tracker = initialized_whitelisted_tracker(); + let tracker = whitelisted_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1175,7 +1160,7 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let tracker = initialized_whitelisted_tracker(); + let tracker = whitelisted_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 3a5e84525..aea8fac37 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -2597,8 +2597,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; @@ -2613,8 +2612,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -2649,8 +2647,7 @@ mod axum_test_env { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = - running_test_environment::(torrust_tracker_test_helpers::configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; let invalid_key = "INVALID_KEY"; From e548f686a435b7a024df7d3eab66a6c4be2a83b9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 14:11:50 +0000 Subject: [PATCH 400/435] test(http): [#224] unit tests for scrape service --- .../axum_implementation/services/scrape.rs | 213 ++++++++++++++++++ src/tracker/peer.rs | 4 + 2 files changed, 217 insertions(+) diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/axum_implementation/services/scrape.rs index cfcba09f9..b48bab642 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/axum_implementation/services/scrape.rs @@ -30,3 +30,216 @@ async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { } } } + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_test_helpers::configuration; + + use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::services::common::tracker_factory; + use crate::tracker::{peer, Tracker}; + + fn public_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_public().into()) + } + + fn sample_info_hashes() -> Vec { + vec![sample_info_hash()] + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + mod with_real_data { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; + + use crate::http::axum_implementation::services::scrape::invoke; + use crate::http::axum_implementation::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; + use crate::tracker::torrent::SwarmMetadata; + use crate::tracker::{statistics, ScrapeData, Tracker}; + + #[tokio::test] + async fn it_should_return_the_scrape_data_for_a_torrent() { + let tracker = Arc::new(public_tracker()); + + let info_hash = sample_info_hash(); + let info_hashes = vec![info_hash]; + + // Announce a new peer to force scrape data to contain not zeroed data + let mut peer = sample_peer(); + let original_peer_ip = peer.ip(); + tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + + let scrape_data = invoke(&tracker, &info_hashes, &original_peer_ip).await; + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + + invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + + invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + } + } + + mod with_zeroed_data { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; + + use crate::http::axum_implementation::services::scrape::fake; + use crate::http::axum_implementation::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; + use crate::tracker::{statistics, ScrapeData, Tracker}; + + #[tokio::test] + async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { + let tracker = Arc::new(public_tracker()); + + let info_hash = sample_info_hash(); + let info_hashes = vec![info_hash]; + + // Announce a new peer to force scrape data to contain not zeroed data + let mut peer = sample_peer(); + let original_peer_ip = peer.ip(); + tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + + let scrape_data = fake(&tracker, &info_hashes, &original_peer_ip).await; + + let expected_scrape_data = ScrapeData::zeroed(&info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + + fake(&tracker, &sample_info_hashes(), &peer_ip).await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + + fake(&tracker, &sample_info_hashes(), &peer_ip).await; + } + } +} diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index c6d87f036..015af12a3 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -40,6 +40,10 @@ impl Peer { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } + pub fn ip(&mut self) -> IpAddr { + self.peer_addr.ip() + } + pub fn change_ip(&mut self, new_ip: &IpAddr) { self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); } From 4355c2a8a62a568814cdaa3f4576d22d70b7ba56 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Mar 2023 19:28:10 +0000 Subject: [PATCH 401/435] feat: [#227] enable Axum HTTP tracker --- src/setup.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/setup.rs b/src/setup.rs index 5b51632a7..ee32f5a81 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -51,7 +51,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::Warp).await); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::Axum).await); } // Start HTTP API From 467d43f607afb1f4821adb85a2f60c8f114314ed Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 08:48:57 +0000 Subject: [PATCH 402/435] refactor: [#229] remove Warp HTTP Tracker --- src/apis/responses.rs | 2 +- src/http/mod.rs | 2 - src/http/warp_implementation/error.rs | 39 - .../warp_implementation/filter_helpers.rs | 85 -- src/http/warp_implementation/filters.rs | 205 --- src/http/warp_implementation/handlers.rs | 207 --- src/http/warp_implementation/launcher.rs | 116 -- src/http/warp_implementation/mod.rs | 14 - src/http/warp_implementation/peer_builder.rs | 32 - src/http/warp_implementation/request.rs | 36 - src/http/warp_implementation/response.rs | 132 -- src/http/warp_implementation/routes.rs | 36 - src/jobs/http_tracker.rs | 62 +- tests/http/asserts.rs | 46 - tests/http/asserts_warp.rs | 34 - tests/http/mod.rs | 1 - tests/http/responses/announce_warp.rs | 30 - tests/http/responses/mod.rs | 1 - tests/http_tracker.rs | 1350 +---------------- tests/udp_tracker.rs | 2 + 20 files changed, 9 insertions(+), 2423 deletions(-) delete mode 100644 src/http/warp_implementation/error.rs delete mode 100644 src/http/warp_implementation/filter_helpers.rs delete mode 100644 src/http/warp_implementation/filters.rs delete mode 100644 src/http/warp_implementation/handlers.rs delete mode 100644 src/http/warp_implementation/launcher.rs delete mode 100644 src/http/warp_implementation/mod.rs delete mode 100644 src/http/warp_implementation/peer_builder.rs delete mode 100644 src/http/warp_implementation/request.rs delete mode 100644 src/http/warp_implementation/response.rs delete mode 100644 src/http/warp_implementation/routes.rs delete mode 100644 tests/http/asserts_warp.rs delete mode 100644 tests/http/responses/announce_warp.rs diff --git a/src/apis/responses.rs b/src/apis/responses.rs index 3b0946396..c0a6cbcf8 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -141,7 +141,7 @@ pub fn failed_to_reload_keys_response(e: E) -> Response { unhandled_rejection_response(format!("failed to reload keys: {e}")) } -/// This error response is to keep backward compatibility with the old Warp API. +/// This error response is to keep backward compatibility with the old API. /// It should be a plain text or json. #[must_use] pub fn unhandled_rejection_response(reason: String) -> Response { diff --git a/src/http/mod.rs b/src/http/mod.rs index b4841c0af..2309ee146 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -15,10 +15,8 @@ use serde::{Deserialize, Serialize}; pub mod axum_implementation; pub mod percent_encoding; pub mod server; -pub mod warp_implementation; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { - Warp, Axum, } diff --git a/src/http/warp_implementation/error.rs b/src/http/warp_implementation/error.rs deleted file mode 100644 index 55b22c27a..000000000 --- a/src/http/warp_implementation/error.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::panic::Location; - -use thiserror::Error; -use torrust_tracker_located_error::LocatedError; -use warp::reject::Reject; - -#[derive(Error, Debug)] -pub enum Error { - #[error("tracker server error: {source}")] - TrackerError { - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, - - #[error("internal server error: {message}, {location}")] - InternalServer { - location: &'static Location<'static>, - message: String, - }, - - #[error("no valid infohashes found, {location}")] - EmptyInfoHash { location: &'static Location<'static> }, - - #[error("peer_id is either missing or invalid, {location}")] - InvalidPeerId { location: &'static Location<'static> }, - - #[error("could not find remote address: {message}, {location}")] - AddressNotFound { - location: &'static Location<'static>, - message: String, - }, - - #[error("too many infohashes: {message}, {location}")] - TwoManyInfoHashes { - location: &'static Location<'static>, - message: String, - }, -} - -impl Reject for Error {} diff --git a/src/http/warp_implementation/filter_helpers.rs b/src/http/warp_implementation/filter_helpers.rs deleted file mode 100644 index 583d38352..000000000 --- a/src/http/warp_implementation/filter_helpers.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::net::{AddrParseError, IpAddr}; -use std::panic::Location; -use std::str::FromStr; - -use thiserror::Error; -use torrust_tracker_located_error::{Located, LocatedError}; - -#[derive(Error, Debug)] -pub enum XForwardedForParseError { - #[error("Empty X-Forwarded-For header value, {location}")] - EmptyValue { location: &'static Location<'static> }, - - #[error("Invalid IP in X-Forwarded-For header: {source}")] - InvalidIp { source: LocatedError<'static, AddrParseError> }, -} - -impl From for XForwardedForParseError { - #[track_caller] - fn from(err: AddrParseError) -> Self { - Self::InvalidIp { - source: Located(err).into(), - } - } -} - -/// It extracts the last IP address from the `X-Forwarded-For` http header value. -/// -/// # Errors -/// -/// Will return and error if the last IP in the `X-Forwarded-For` header is not a valid IP -pub fn maybe_rightmost_forwarded_ip(x_forwarded_for_value: &str) -> Result { - let mut x_forwarded_for_raw = x_forwarded_for_value.to_string(); - - // Remove whitespace chars - x_forwarded_for_raw.retain(|c| !c.is_whitespace()); - - // Get all forwarded IP's in a vec - let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); - - match x_forwarded_ips.last() { - Some(last_ip) => match IpAddr::from_str(last_ip) { - Ok(ip) => Ok(ip), - Err(err) => Err(err.into()), - }, - None => Err(XForwardedForParseError::EmptyValue { - location: Location::caller(), - }), - } -} - -#[cfg(test)] -mod tests { - - use std::net::IpAddr; - use std::str::FromStr; - - use super::maybe_rightmost_forwarded_ip; - - #[test] - fn the_last_forwarded_ip_can_be_parsed_from_the_the_corresponding_http_header() { - assert!(maybe_rightmost_forwarded_ip("").is_err()); - - assert!(maybe_rightmost_forwarded_ip("INVALID IP").is_err()); - - assert_eq!( - maybe_rightmost_forwarded_ip("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap(), - IpAddr::from_str("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap() - ); - - assert_eq!( - maybe_rightmost_forwarded_ip("203.0.113.195").unwrap(), - IpAddr::from_str("203.0.113.195").unwrap() - ); - - assert_eq!( - maybe_rightmost_forwarded_ip("203.0.113.195, 2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap(), - IpAddr::from_str("2001:db8:85a3:8d3:1319:8a2e:370:7348").unwrap() - ); - - assert_eq!( - maybe_rightmost_forwarded_ip("203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178").unwrap(), - IpAddr::from_str("150.172.238.178").unwrap() - ); - } -} diff --git a/src/http/warp_implementation/filters.rs b/src/http/warp_implementation/filters.rs deleted file mode 100644 index 06168d82a..000000000 --- a/src/http/warp_implementation/filters.rs +++ /dev/null @@ -1,205 +0,0 @@ -use std::convert::Infallible; -use std::net::{IpAddr, SocketAddr}; -use std::panic::Location; -use std::str::FromStr; -use std::sync::Arc; - -use warp::{reject, Filter, Rejection}; - -use super::error::Error; -use super::filter_helpers::maybe_rightmost_forwarded_ip; -use super::{request, WebResult}; -use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; -use crate::protocol::common::MAX_SCRAPE_TORRENTS; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::Key; -use crate::tracker::{self, peer}; - -/// Pass Arc along -#[must_use] -pub fn with_tracker( - tracker: Arc, -) -> impl Filter,), Error = Infallible> + Clone { - warp::any().map(move || tracker.clone()) -} - -/// Check for infoHash -#[must_use] -pub fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { - warp::filters::query::raw().and_then(|q| async move { info_hashes(&q) }) -} - -/// Check for `PeerId` -#[must_use] -pub fn with_peer_id() -> impl Filter + Clone { - warp::filters::query::raw().and_then(|q| async move { peer_id(&q) }) -} - -/// Pass Arc along -#[must_use] -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { - warp::path::param::() - .map(|key: String| { - let key = Key::from_str(&key); - match key { - Ok(id) => Some(id), - Err(_) => None, - } - }) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) -} - -/// Check for `PeerAddress` -#[must_use] -pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::addr::remote() - .and(warp::header::optional::("X-Forwarded-For")) - .map(move |remote_addr: Option, x_forwarded_for: Option| { - (on_reverse_proxy, remote_addr, x_forwarded_for) - }) - .and_then(|q| async move { peer_addr(q) }) -} - -/// Check for `request::Announce` -#[must_use] -pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::filters::query::query::() - .and(with_info_hash()) - .and(with_peer_id()) - .and(with_peer_addr(on_reverse_proxy)) - .and_then(|q, r, s, t| async move { announce_request(q, &r, s, t) }) -} - -/// Check for `ScrapeRequest` -#[must_use] -pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::any() - .and(with_info_hash()) - .and(with_peer_addr(on_reverse_proxy)) - .and_then(|q, r| async move { scrape_request(q, r) }) -} - -/// Parse `InfoHash` from raw query string -#[allow(clippy::ptr_arg)] -fn info_hashes(raw_query: &String) -> WebResult> { - let split_raw_query: Vec<&str> = raw_query.split('&').collect(); - let mut info_hashes: Vec = Vec::new(); - - for v in split_raw_query { - if v.contains("info_hash") { - // get raw percent encoded infohash - let raw_info_hash = v.split('=').collect::>()[1]; - - let info_hash = percent_decode_info_hash(raw_info_hash); - - if let Ok(ih) = info_hash { - info_hashes.push(ih); - } - } - } - - if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { - Err(reject::custom(Error::TwoManyInfoHashes { - location: Location::caller(), - message: format! {"found: {}, but limit is: {}",info_hashes.len(), MAX_SCRAPE_TORRENTS}, - })) - } else if info_hashes.is_empty() { - Err(reject::custom(Error::EmptyInfoHash { - location: Location::caller(), - })) - } else { - Ok(info_hashes) - } -} - -/// Parse `PeerId` from raw query string -#[allow(clippy::ptr_arg)] -fn peer_id(raw_query: &String) -> WebResult { - // put all query params in a vec - let split_raw_query: Vec<&str> = raw_query.split('&').collect(); - - let mut peer_id: Option = None; - - for v in split_raw_query { - // look for the peer_id param - if v.contains("peer_id") { - // get raw percent encoded peer id - let raw_peer_id = v.split('=').collect::>()[1]; - - if let Ok(id) = percent_decode_peer_id(raw_peer_id) { - peer_id = Some(id); - } else { - return Err(reject::custom(Error::InvalidPeerId { - location: Location::caller(), - })); - } - - break; - } - } - - match peer_id { - Some(id) => Ok(id), - None => Err(reject::custom(Error::InvalidPeerId { - location: Location::caller(), - })), - } -} - -/// Get peer IP from HTTP client IP or X-Forwarded-For HTTP header -fn peer_addr( - (on_reverse_proxy, remote_client_ip, maybe_x_forwarded_for): (bool, Option, Option), -) -> WebResult { - if on_reverse_proxy { - if maybe_x_forwarded_for.is_none() { - return Err(reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: "must have a x-forwarded-for when using a reverse proxy".to_string(), - })); - } - - let x_forwarded_for = maybe_x_forwarded_for.unwrap(); - - maybe_rightmost_forwarded_ip(&x_forwarded_for).map_err(|e| { - reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: format!("on remote proxy and unable to parse the last x-forwarded-ip: `{e}`, from `{x_forwarded_for}`"), - }) - }) - } else if remote_client_ip.is_none() { - return Err(reject::custom(Error::AddressNotFound { - location: Location::caller(), - message: "neither on have remote address or on a reverse proxy".to_string(), - })); - } else { - return Ok(remote_client_ip.unwrap().ip()); - } -} - -/// Parse `AnnounceRequest` from raw `AnnounceRequestQuery`, `InfoHash` and Option -#[allow(clippy::unnecessary_wraps)] -#[allow(clippy::ptr_arg)] -fn announce_request( - announce_request_query: request::AnnounceQuery, - info_hashes: &Vec, - peer_id: peer::Id, - peer_addr: IpAddr, -) -> WebResult { - Ok(request::Announce { - info_hash: info_hashes[0], - peer_addr, - downloaded: announce_request_query.downloaded.unwrap_or(0), - uploaded: announce_request_query.uploaded.unwrap_or(0), - peer_id, - port: announce_request_query.port, - left: announce_request_query.left.unwrap_or(0), - event: announce_request_query.event, - compact: announce_request_query.compact, - }) -} - -/// Parse `ScrapeRequest` from `InfoHash` -#[allow(clippy::unnecessary_wraps)] -fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(request::Scrape { info_hashes, peer_addr }) -} diff --git a/src/http/warp_implementation/handlers.rs b/src/http/warp_implementation/handlers.rs deleted file mode 100644 index f9aedeb8f..000000000 --- a/src/http/warp_implementation/handlers.rs +++ /dev/null @@ -1,207 +0,0 @@ -use std::collections::HashMap; -use std::convert::Infallible; -use std::net::IpAddr; -use std::panic::Location; -use std::sync::Arc; - -use log::debug; -use warp::http::Response; -use warp::{reject, Rejection, Reply}; - -use super::error::Error; -use super::{request, response, WebResult}; -use crate::http::warp_implementation::peer_builder; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::Key; -use crate::tracker::{self, auth, peer, statistics, torrent}; - -/// Authenticate `InfoHash` using optional `auth::Key` -/// -/// # Errors -/// -/// Will return `ServerError` that wraps the `tracker::error::Error` if unable to `authenticate_request`. -pub async fn authenticate( - info_hash: &InfoHash, - auth_key: &Option, - tracker: Arc, -) -> Result<(), Error> { - tracker - .authenticate_request(info_hash, auth_key) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) -} - -/// # Errors -/// -/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_announce_response`. -pub async fn handle_announce( - announce_request: request::Announce, - auth_key: Option, - tracker: Arc, -) -> WebResult { - debug!("http announce request: {:#?}", announce_request); - - let info_hash = announce_request.info_hash; - let remote_client_ip = announce_request.peer_addr; - - authenticate(&info_hash, &auth_key, tracker.clone()).await?; - - let mut peer = peer_builder::from_request(&announce_request, &remote_client_ip); - - // todo: we should be use the http::axum_implementation::services::announce::announce service, - // but this Warp implementation is going to be removed. - - let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; - - match remote_client_ip { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Tcp4Announce).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Tcp6Announce).await; - } - } - - send_announce_response( - &announce_request, - &response.swarm_stats, - &response.peers, - tracker.config.announce_interval, - tracker.config.min_announce_interval, - ) -} - -/// # Errors -/// -/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. -pub async fn handle_scrape( - scrape_request: request::Scrape, - auth_key: Option, - tracker: Arc, -) -> WebResult { - let mut files: HashMap = HashMap::new(); - let db = tracker.get_torrents().await; - - for info_hash in &scrape_request.info_hashes { - let scrape_entry = match db.get(info_hash) { - Some(torrent_info) => { - if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { - let (seeders, completed, leechers) = torrent_info.get_stats(); - response::ScrapeEntry { - complete: seeders, - downloaded: completed, - incomplete: leechers, - } - } else { - response::ScrapeEntry { - complete: 0, - downloaded: 0, - incomplete: 0, - } - } - } - None => response::ScrapeEntry { - complete: 0, - downloaded: 0, - incomplete: 0, - }, - }; - - files.insert(*info_hash, scrape_entry); - } - - // send stats event - match scrape_request.peer_addr { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; - } - } - - send_scrape_response(files) -} - -/// Send announce response -#[allow(clippy::ptr_arg)] -fn send_announce_response( - announce_request: &request::Announce, - torrent_stats: &torrent::SwarmStats, - peers: &Vec, - interval: u32, - interval_min: u32, -) -> WebResult { - let http_peers: Vec = peers - .iter() - .map(|peer| response::Peer { - peer_id: peer.peer_id.to_string(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port(), - }) - .collect(); - - let res = response::Announce { - interval, - interval_min, - complete: torrent_stats.seeders, - incomplete: torrent_stats.leechers, - peers: http_peers, - }; - - // check for compact response request - if let Some(1) = announce_request.compact { - match res.write_compact() { - Ok(body) => Ok(Response::new(body)), - Err(e) => Err(reject::custom(Error::InternalServer { - message: e.to_string(), - location: Location::caller(), - })), - } - } else { - Ok(Response::new(res.write().into())) - } -} - -/// Send scrape response -fn send_scrape_response(files: HashMap) -> WebResult { - let res = response::Scrape { files }; - - match res.write() { - Ok(body) => Ok(Response::new(body)), - Err(e) => Err(reject::custom(Error::InternalServer { - message: e.to_string(), - location: Location::caller(), - })), - } -} - -/// Handle all server errors and send error reply -/// -/// # Errors -/// -/// Will not return a error, `Infallible`, but instead convert the `ServerError` into a `Response`. -pub fn send_error(r: &Rejection) -> std::result::Result { - let warp_reject_error = r.find::(); - - let body = if let Some(error) = warp_reject_error { - debug!("{:?}", error); - response::Error { - failure_reason: error.to_string(), - } - .write() - } else { - response::Error { - failure_reason: Error::InternalServer { - message: "Undefined".to_string(), - location: Location::caller(), - } - .to_string(), - } - .write() - }; - - Ok(Response::new(body)) -} diff --git a/src/http/warp_implementation/launcher.rs b/src/http/warp_implementation/launcher.rs deleted file mode 100644 index 46ec2bf3c..000000000 --- a/src/http/warp_implementation/launcher.rs +++ /dev/null @@ -1,116 +0,0 @@ -use std::future::Future; -use std::net::SocketAddr; -use std::str::FromStr; -use std::sync::Arc; - -use futures::future::BoxFuture; - -use super::routes; -use crate::http::server::HttpServerLauncher; -use crate::tracker; -use crate::tracker::Tracker; - -#[derive(Debug)] -pub enum Error { - Error(String), -} - -pub struct Launcher; - -impl Launcher { - pub fn start_with_graceful_shutdown( - addr: SocketAddr, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let (bind_addr, server) = warp::serve(routes::routes(tracker)).bind_with_graceful_shutdown(addr, shutdown_signal); - - (bind_addr, Box::pin(server)) - } - - pub fn start_tls_with_graceful_shutdown( - addr: SocketAddr, - (ssl_cert_path, ssl_key_path): (&str, &str), - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let (bind_addr, server) = warp::serve(routes::routes(tracker)) - .tls() - .cert_path(ssl_cert_path) - .key_path(ssl_key_path) - .bind_with_graceful_shutdown(addr, shutdown_signal); - - (bind_addr, Box::pin(server)) - } -} - -impl HttpServerLauncher for Launcher { - fn new() -> Self { - Self {} - } - - fn start_with_graceful_shutdown( - &self, - cfg: torrust_tracker_configuration::HttpTracker, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); - - if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { - Self::start_tls_with_graceful_shutdown(addr, (ssl_cert_path, ssl_key_path), tracker, shutdown_signal) - } else { - Self::start_with_graceful_shutdown(addr, tracker, shutdown_signal) - } - } -} - -/// Server that listens on HTTP, needs a `tracker::TorrentTracker` -#[derive(Clone)] -pub struct Http { - tracker: Arc, -} - -impl Http { - #[must_use] - pub fn new(tracker: Arc) -> Http { - Http { tracker } - } - - /// Start the `HttpServer` - pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { - let (_addr, server) = - warp::serve(routes::routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - - server - } - - /// Start the `HttpServer` in TLS mode - pub fn start_tls( - &self, - socket_addr: SocketAddr, - ssl_cert_path: String, - ssl_key_path: String, - ) -> impl warp::Future { - let (_addr, server) = warp::serve(routes::routes(self.tracker.clone())) - .tls() - .cert_path(ssl_cert_path) - .key_path(ssl_key_path) - .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - - server - } -} diff --git a/src/http/warp_implementation/mod.rs b/src/http/warp_implementation/mod.rs deleted file mode 100644 index c0e046f4f..000000000 --- a/src/http/warp_implementation/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -pub mod error; -pub mod filter_helpers; -pub mod filters; -pub mod handlers; -pub mod launcher; -pub mod peer_builder; -pub mod request; -pub mod response; -pub mod routes; - -use warp::Rejection; - -pub type Bytes = u64; -pub type WebResult = std::result::Result; diff --git a/src/http/warp_implementation/peer_builder.rs b/src/http/warp_implementation/peer_builder.rs deleted file mode 100644 index 70cf7b508..000000000 --- a/src/http/warp_implementation/peer_builder.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::net::{IpAddr, SocketAddr}; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - -use super::request::Announce; -use crate::protocol::clock::{Current, Time}; -use crate::tracker::peer::Peer; - -#[must_use] -pub fn from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None, - } - } else { - AnnounceEvent::None - }; - - #[allow(clippy::cast_possible_truncation)] - Peer { - peer_id: announce_request.peer_id, - peer_addr: SocketAddr::new(*peer_ip, announce_request.port), - updated: Current::now(), - uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), - downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), - left: NumberOfBytes(i128::from(announce_request.left) as i64), - event, - } -} diff --git a/src/http/warp_implementation/request.rs b/src/http/warp_implementation/request.rs deleted file mode 100644 index f666b48c5..000000000 --- a/src/http/warp_implementation/request.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::net::IpAddr; - -use serde::Deserialize; - -use crate::http::warp_implementation::Bytes; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::peer; - -#[derive(Deserialize)] -pub struct AnnounceQuery { - pub downloaded: Option, - pub uploaded: Option, - pub key: Option, - pub port: u16, - pub left: Option, - pub event: Option, - pub compact: Option, -} - -#[derive(Debug)] -pub struct Announce { - pub info_hash: InfoHash, - pub peer_addr: IpAddr, - pub downloaded: Bytes, - pub uploaded: Bytes, - pub peer_id: peer::Id, - pub port: u16, - pub left: Bytes, - pub event: Option, - pub compact: Option, -} - -pub struct Scrape { - pub info_hashes: Vec, - pub peer_addr: IpAddr, -} diff --git a/src/http/warp_implementation/response.rs b/src/http/warp_implementation/response.rs deleted file mode 100644 index 1e9f7fa09..000000000 --- a/src/http/warp_implementation/response.rs +++ /dev/null @@ -1,132 +0,0 @@ -use std::collections::HashMap; -use std::io::Write; -use std::net::IpAddr; - -use serde::{self, Deserialize, Serialize}; - -use crate::protocol::info_hash::InfoHash; - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Peer { - pub peer_id: String, - pub ip: IpAddr, - pub port: u16, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Announce { - pub interval: u32, - #[serde(rename = "min interval")] - pub interval_min: u32, - //pub tracker_id: String, - pub complete: u32, - pub incomplete: u32, - pub peers: Vec, -} - -impl Announce { - /// # Panics - /// - /// It would panic if the `Announce` struct would contain an inappropriate type. - #[must_use] - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } - - /// # Errors - /// - /// Will return `Err` if internally interrupted. - pub fn write_compact(&self) -> Result, Box> { - let mut peers_v4: Vec = Vec::new(); - let mut peers_v6: Vec = Vec::new(); - - for peer in &self.peers { - match peer.ip { - IpAddr::V4(ip) => { - peers_v4.write_all(&u32::from(ip).to_be_bytes())?; - peers_v4.write_all(&peer.port.to_be_bytes())?; - } - IpAddr::V6(ip) => { - peers_v6.write_all(&u128::from(ip).to_be_bytes())?; - peers_v6.write_all(&peer.port.to_be_bytes())?; - } - } - } - - let mut bytes: Vec = Vec::new(); - bytes.write_all(b"d8:intervali")?; - bytes.write_all(self.interval.to_string().as_bytes())?; - bytes.write_all(b"e12:min intervali")?; - bytes.write_all(self.interval_min.to_string().as_bytes())?; - bytes.write_all(b"e8:completei")?; - bytes.write_all(self.complete.to_string().as_bytes())?; - bytes.write_all(b"e10:incompletei")?; - bytes.write_all(self.incomplete.to_string().as_bytes())?; - bytes.write_all(b"e5:peers")?; - bytes.write_all(peers_v4.len().to_string().as_bytes())?; - bytes.write_all(b":")?; - bytes.write_all(peers_v4.as_slice())?; - bytes.write_all(b"e6:peers6")?; - bytes.write_all(peers_v6.len().to_string().as_bytes())?; - bytes.write_all(b":")?; - bytes.write_all(peers_v6.as_slice())?; - bytes.write_all(b"e")?; - - Ok(bytes) - } -} - -#[derive(Serialize)] -pub struct ScrapeEntry { - pub complete: u32, - pub downloaded: u32, - pub incomplete: u32, -} - -#[derive(Serialize)] -pub struct Scrape { - pub files: HashMap, -} - -impl Scrape { - /// # Errors - /// - /// Will return `Err` if internally interrupted. - pub fn write(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - - bytes.write_all(b"d5:filesd")?; - - for (info_hash, scrape_response_entry) in &self.files { - bytes.write_all(b"20:")?; - bytes.write_all(&info_hash.0)?; - bytes.write_all(b"d8:completei")?; - bytes.write_all(scrape_response_entry.complete.to_string().as_bytes())?; - bytes.write_all(b"e10:downloadedi")?; - bytes.write_all(scrape_response_entry.downloaded.to_string().as_bytes())?; - bytes.write_all(b"e10:incompletei")?; - bytes.write_all(scrape_response_entry.incomplete.to_string().as_bytes())?; - bytes.write_all(b"ee")?; - } - - bytes.write_all(b"ee")?; - - Ok(bytes) - } -} - -#[derive(Serialize)] -pub struct Error { - #[serde(rename = "failure reason")] - pub failure_reason: String, -} - -impl Error { - /// # Panics - /// - /// It would panic if the `Error` struct would contain an inappropriate type. - #[must_use] - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } -} diff --git a/src/http/warp_implementation/routes.rs b/src/http/warp_implementation/routes.rs deleted file mode 100644 index c46c502e4..000000000 --- a/src/http/warp_implementation/routes.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::convert::Infallible; -use std::sync::Arc; - -use warp::{Filter, Rejection}; - -use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; -use super::handlers::{handle_announce, handle_scrape, send_error}; -use crate::tracker; - -/// All routes -#[must_use] -pub fn routes(tracker: Arc) -> impl Filter + Clone { - announce(tracker.clone()) - .or(scrape(tracker)) - .recover(|q| async move { send_error(&q) }) -} - -/// GET /announce or /announce/ -fn announce(tracker: Arc) -> impl Filter + Clone { - warp::path::path("announce") - .and(warp::filters::method::get()) - .and(with_announce_request(tracker.config.on_reverse_proxy)) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_announce) -} - -/// GET /scrape/ -fn scrape(tracker: Arc) -> impl Filter + Clone { - warp::path::path("scrape") - .and(warp::filters::method::get()) - .and(with_scrape_request(tracker.config.on_reverse_proxy)) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_scrape) -} diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 40caa8e88..70f512a39 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -1,14 +1,12 @@ -use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use log::{info, warn}; +use log::info; use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; use crate::http::axum_implementation::launcher; -use crate::http::warp_implementation::launcher::Http; use crate::http::Version; use crate::tracker; @@ -17,68 +15,10 @@ pub struct ServerJobStarted(); pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { match version { - Version::Warp => start_warp(config, tracker.clone()).await, Version::Axum => start_axum(config, tracker.clone()).await, } } -/// # Panics -/// -/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. -async fn start_warp(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config - .bind_address - .parse::() - .expect("HTTP tracker server bind_address invalid."); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - let (tx, rx) = oneshot::channel::(); - - // Run the HTTP tracker server - let join_handle = tokio::spawn(async move { - let http_tracker = Http::new(tracker); - - if !ssl_enabled { - info!("Starting HTTP tracker server on: http://{}", bind_addr); - - let handle = http_tracker.start(bind_addr); - - tx.send(ServerJobStarted()) - .expect("HTTP tracker server should not be dropped"); - - handle.await; - - info!("HTTP tracker server on http://{} stopped", bind_addr); - } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: https://{}", bind_addr); - - let handle = http_tracker.start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()); - - tx.send(ServerJobStarted()) - .expect("HTTP tracker server should not be dropped"); - - handle.await; - - info!("HTTP tracker server on https://{} stopped", bind_addr); - } else { - warn!( - "Could not start HTTPS tracker server on: {}, missing SSL Cert or Key!", - bind_addr - ); - } - }); - - // Wait until the HTTPS tracker server job is running - match rx.await { - Ok(_msg) => info!("HTTP tracker server started"), - Err(e) => panic!("HTTP tracker server was dropped: {e}"), - } - - join_handle -} - /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. diff --git a/tests/http/asserts.rs b/tests/http/asserts.rs index 0d5441f89..932b48be4 100644 --- a/tests/http/asserts.rs +++ b/tests/http/asserts.rs @@ -108,48 +108,12 @@ pub async fn assert_missing_query_params_for_scrape_request_error_response(respo // Other errors -pub async fn assert_internal_server_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error(&response.text().await.unwrap(), "internal server", Location::caller()); -} - -pub async fn assert_invalid_info_hash_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "no valid infohashes found", - Location::caller(), - ); -} - -pub async fn assert_invalid_peer_id_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "peer_id is either missing or invalid", - Location::caller(), - ); -} - pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { assert_eq!(response.status(), 200); assert_bencoded_error(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); } -pub async fn assert_could_not_find_remote_address_on_xff_header_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "could not find remote address: must have a x-forwarded-for when using a reverse proxy", - Location::caller(), - ); -} - pub async fn assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response: Response) { assert_eq!(response.status(), 200); @@ -160,16 +124,6 @@ pub async fn assert_could_not_find_remote_address_on_x_forwarded_for_header_erro ); } -pub async fn assert_invalid_remote_address_on_xff_header_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "could not find remote address: on remote proxy and unable to parse the last x-forwarded-ip", - Location::caller(), - ); -} - pub async fn assert_cannot_parse_query_param_error_response(response: Response, failure: &str) { assert_cannot_parse_query_params_error_response(response, &format!(": {failure}")).await; } diff --git a/tests/http/asserts_warp.rs b/tests/http/asserts_warp.rs deleted file mode 100644 index d1a936efa..000000000 --- a/tests/http/asserts_warp.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::panic::Location; - -/// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. -use reqwest::Response; - -use super::responses::announce_warp::WarpAnnounce; -use crate::http::asserts::assert_bencoded_error; - -pub async fn assert_warp_announce_response(response: Response, expected_announce_response: &WarpAnnounce) { - assert_eq!(response.status(), 200); - - let body = response.text().await.unwrap(); - - let announce_response: WarpAnnounce = serde_bencode::from_str(&body) - .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); - - assert_eq!(announce_response, *expected_announce_response); -} - -pub async fn assert_warp_peer_not_authenticated_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error( - &response.text().await.unwrap(), - "The peer is not authenticated", - Location::caller(), - ); -} - -pub async fn assert_warp_invalid_authentication_key_error_response(response: Response) { - assert_eq!(response.status(), 200); - - assert_bencoded_error(&response.text().await.unwrap(), "is not valid", Location::caller()); -} diff --git a/tests/http/mod.rs b/tests/http/mod.rs index 771145f46..b0d896c99 100644 --- a/tests/http/mod.rs +++ b/tests/http/mod.rs @@ -1,5 +1,4 @@ pub mod asserts; -pub mod asserts_warp; pub mod client; pub mod requests; pub mod responses; diff --git a/tests/http/responses/announce_warp.rs b/tests/http/responses/announce_warp.rs deleted file mode 100644 index 0fcf05eb8..000000000 --- a/tests/http/responses/announce_warp.rs +++ /dev/null @@ -1,30 +0,0 @@ -/// todo: this mod should be removed when we remove the Warp implementation for the HTTP tracker. -use serde::{self, Deserialize, Serialize}; -use torrust_tracker::tracker::peer::Peer; - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct WarpAnnounce { - pub complete: u32, - pub incomplete: u32, - pub interval: u32, - #[serde(rename = "min interval")] - pub min_interval: u32, - pub peers: Vec, // Peers using IPV4 -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct WarpDictionaryPeer { - pub ip: String, - pub peer_id: String, - pub port: u16, -} - -impl From for WarpDictionaryPeer { - fn from(peer: Peer) -> Self { - Self { - peer_id: peer.peer_id.to_string(), - ip: peer.peer_addr.ip().to_string(), - port: peer.peer_addr.port(), - } - } -} diff --git a/tests/http/responses/mod.rs b/tests/http/responses/mod.rs index aecb53fed..bdc689056 100644 --- a/tests/http/responses/mod.rs +++ b/tests/http/responses/mod.rs @@ -1,4 +1,3 @@ pub mod announce; -pub mod announce_warp; pub mod error; pub mod scrape; diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index aea8fac37..1b07b987a 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -1,1368 +1,28 @@ /// Integration tests for HTTP tracker server /// -/// Warp version: /// ```text -/// cargo test `warp_test_env` -- --nocapture -/// ``` -/// -/// Axum version (WIP): -/// ```text -/// cargo test `warp_test_env` -- --nocapture +/// cargo test `http_tracker_server` -- --nocapture /// ``` mod common; mod http; pub type Axum = torrust_tracker::http::axum_implementation::launcher::Launcher; -pub type Warp = torrust_tracker::http::warp_implementation::launcher::Launcher; -mod test_env_test_environment { +mod test_environment_for_http_tracker { use torrust_tracker_test_helpers::configuration; use crate::http::test_environment::running_test_environment; - use crate::{Axum, Warp}; + use crate::Axum; #[tokio::test] - async fn should_be_able_to_start_and_stop_a_test_environment_using_axum() { + async fn should_be_started_and_stopped() { let test_env = running_test_environment::(configuration::ephemeral()).await; test_env.stop().await; } - - #[tokio::test] - async fn should_be_able_to_start_and_stop_a_test_environment_using_warp() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - test_env.stop().await; - } } -mod warp_test_env { - - mod for_all_config_modes { - - mod running_on_reverse_proxy { - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::{ - assert_could_not_find_remote_address_on_xff_header_error_response, - assert_invalid_remote_address_on_xff_header_error_response, - }; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { - // If the tracker is running behind a reverse proxy, the peer IP is the - // last IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy client. - - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let params = QueryBuilder::default().query().params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_could_not_find_remote_address_on_xff_header_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let params = QueryBuilder::default().query().params(); - - let response = Client::new(*test_env.bind_address()) - .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") - .await; - - assert_invalid_remote_address_on_xff_header_error_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_announce_request { - - // Announce request documentation: - // - // BEP 03. The BitTorrent Protocol Specification - // https://www.bittorrent.org/beps/bep_0003.html - // - // BEP 23. Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Announce - - use std::net::{IpAddr, Ipv6Addr}; - use std::str::FromStr; - - use local_ip_address::local_ip; - use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_announce_response, assert_compact_announce_response, assert_empty_announce_response, - assert_internal_server_error_response, assert_invalid_info_hash_error_response, - assert_invalid_peer_id_error_response, assert_is_announce_response, - }; - use crate::http::asserts_warp::assert_warp_announce_response; - use crate::http::client::Client; - use crate::http::requests::announce::{Compact, QueryBuilder}; - use crate::http::responses; - use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList}; - use crate::http::responses::announce_warp::{WarpAnnounce, WarpDictionaryPeer}; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.remove_optional_params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let response = Client::new(*test_env.bind_address()).get("announce").await; - - assert_internal_server_error_response(response).await; - } - - #[tokio::test] - async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - // Without `info_hash` param - - let mut params = QueryBuilder::default().query().params(); - - params.info_hash = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_invalid_info_hash_error_response(response).await; - - // Without `peer_id` param - - let mut params = QueryBuilder::default().query().params(); - - params.peer_id = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_invalid_peer_id_error_response(response).await; - - // Without `port` param - - let mut params = QueryBuilder::default().query().params(); - - params.port = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - for invalid_value in &invalid_info_hashes() { - params.set("info_hash", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_invalid_info_hash_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_when_the_peer_address_param_is_invalid() { - // AnnounceQuery does not even contain the `peer_addr` - // The peer IP is obtained in two ways: - // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP if there. - // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request header is tracker is running `on_reverse_proxy`. - - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("downloaded", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("uploaded", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "-qB0000000000000000", // 19 bytes - "-qB000000000000000000", // 21 bytes - ]; - - for invalid_value in invalid_values { - params.set("peer_id", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_invalid_peer_id_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("port", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("left", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_when_the_event_param_is_invalid() { - // All invalid values are ignored as if the `event` param were empty - - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "Started", // It should be lowercase - "Stopped", // It should be lowercase - "Completed", // It should be lowercase - ]; - - for invalid_value in invalid_values { - params.set("event", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("compact", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) - .query(), - ) - .await; - - assert_announce_response( - response, - &Announce { - complete: 1, // the peer for this test - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .query(), - ) - .await; - - // It should only contain the previously announced peer - assert_warp_announce_response( - response, - &WarpAnnounce { - complete: 2, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![WarpDictionaryPeer::from(previously_announced_peer)], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = PeerBuilder::default().build(); - - // Add a peer - test_env.add_torrent_peer(&info_hash, &peer).await; - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer.peer_id) - .query(); - - assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - - let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; - - assert_empty_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_compact_response() { - // Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2 accepting compact responses - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_compact(Compact::Accepted) - .query(), - ) - .await; - - let expected_response = responses::announce::Compact { - complete: 2, - incomplete: 0, - interval: 120, - min_interval: 120, - peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), - }; - - assert_compact_announce_response(response, &expected_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_return_the_compact_response_by_default() { - // code-review: the HTTP tracker does not return the compact response by default if the "compact" - // param is not provided in the announce URL. The BEP 23 suggest to do so. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2 without passing the "compact" param - // By default it should respond with the compact peer list - // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .without_compact() - .query(), - ) - .await; - - assert!(!is_a_compact_announce_response(response).await); - - test_env.stop().await; - } - - async fn is_a_compact_announce_response(response: Response) -> bool { - let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); - compact_announce.is_ok() - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 0); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 0); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client_ip = local_ip().unwrap(); - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), client_ip); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. - - client <-> tracker <-> Internet - 127.0.0.1 external_ip = "2.137.87.41" - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. - - client <-> tracker <-> Internet - ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( - ) { - /* - client <-> http proxy <-> tracker <-> Internet - ip: header: config: peer addr: - 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let client = Client::new(*test_env.bind_address()); - - let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - - client - .announce_with_header( - &announce_query, - "X-Forwarded-For", - "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", - ) - .await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - - // Scrape documentation: - // - // BEP 48. Tracker Protocol Extension: Scrape - // https://www.bittorrent.org/beps/bep_0048.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Scrape - - use std::net::IpAddr; - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{assert_internal_server_error_response, assert_scrape_response}; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::requests::scrape::QueryBuilder; - use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_fail_when_the_request_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(*test_env.bind_address()).get("scrape").await; - - assert_internal_server_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let mut params = QueryBuilder::default().query().params(); - - for invalid_value in &invalid_info_hashes() { - params.set_one_info_hash_param(invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - // code-review: it's not returning the invalid info hash error - assert_internal_server_error_response(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 1, - downloaded: 0, - incomplete: 0, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_accept_multiple_infohashes() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .add_info_hash(&info_hash1) - .add_info_hash(&info_hash2) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file(info_hash1.bytes(), File::zeroed()) - .add_file(info_hash2.bytes(), File::zeroed()) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_scrapes_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_scrapes_handled, 1); - - drop(stats); - - test_env.stop().await; - } - } - } - - mod configured_as_whitelisted { - - mod and_receiving_an_announce_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_torrent_not_in_whitelist_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - - async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - } - } - - mod configured_as_private { - - mod and_receiving_an_announce_request { - use std::str::FromStr; - use std::time::Duration; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::assert_is_announce_response; - use crate::http::asserts_warp::{ - assert_warp_invalid_authentication_key_error_response, assert_warp_peer_not_authenticated_error_response, - }; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_respond_to_authenticated_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .announce(&QueryBuilder::default().query()) - .await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_warp_peer_not_authenticated_error_response(response).await; - } - - #[tokio::test] - async fn should_fail_if_the_peer_authentication_key_is_not_valid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - // The tracker does not have this key - let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), unregistered_key) - .announce(&QueryBuilder::default().query()) - .await; - - assert_warp_invalid_authentication_key_error_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - - use std::str::FromStr; - use std::time::Duration; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Warp; - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { - // There is not authentication error - - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), false_key) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - } - } - - mod configured_as_private_and_whitelisted { - - mod and_receiving_an_announce_request {} - - mod receiving_an_scrape_request {} - } -} - -mod axum_test_env { - - // WIP: migration HTTP from Warp to Axum +mod http_tracker { mod for_all_config_modes { diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs index 0f9283a8b..3fe78c03d 100644 --- a/tests/udp_tracker.rs +++ b/tests/udp_tracker.rs @@ -1,6 +1,8 @@ /// Integration tests for UDP tracker server /// +/// ```text /// cargo test `udp_tracker_server` -- --nocapture +/// ``` extern crate rand; mod common; From 96d5333021f33938c7faf9e2c9f7d09f314aaaae Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 09:04:06 +0000 Subject: [PATCH 403/435] refactor: [#229] rename Axum HTTP tracker to v1 (version1) --- src/http/mod.rs | 4 +- .../extractors/announce_request.rs | 10 +- .../extractors/authentication_key.rs | 6 +- .../extractors/client_ip_sources.rs | 2 +- .../extractors/mod.rs | 0 .../extractors/scrape_request.rs | 10 +- .../handlers/announce.rs | 42 +- .../handlers/common/auth.rs | 2 +- .../handlers/common/mod.rs | 0 .../handlers/common/peer_ip.rs | 8 +- .../handlers/mod.rs | 0 .../handlers/scrape.rs | 34 +- .../{axum_implementation => v1}/launcher.rs | 0 src/http/{axum_implementation => v1}/mod.rs | 0 src/http/{axum_implementation => v1}/query.rs | 8 +- .../requests/announce.rs | 12 +- .../requests/mod.rs | 0 .../requests/scrape.rs | 12 +- .../responses/announce.rs | 4 +- .../responses/error.rs | 0 .../responses/mod.rs | 0 .../responses/scrape.rs | 2 +- .../{axum_implementation => v1}/routes.rs | 0 .../services/announce.rs | 4 +- .../services/mod.rs | 0 .../services/peer_ip_resolver.rs | 4 +- .../services/scrape.rs | 12 +- src/jobs/http_tracker.rs | 6 +- src/setup.rs | 2 +- tests/http_tracker.rs | 2155 +++++++++-------- 30 files changed, 1169 insertions(+), 1170 deletions(-) rename src/http/{axum_implementation => v1}/extractors/announce_request.rs (90%) rename src/http/{axum_implementation => v1}/extractors/authentication_key.rs (94%) rename src/http/{axum_implementation => v1}/extractors/client_ip_sources.rs (93%) rename src/http/{axum_implementation => v1}/extractors/mod.rs (100%) rename src/http/{axum_implementation => v1}/extractors/scrape_request.rs (92%) rename src/http/{axum_implementation => v1}/handlers/announce.rs (85%) rename src/http/{axum_implementation => v1}/handlers/common/auth.rs (95%) rename src/http/{axum_implementation => v1}/handlers/common/mod.rs (100%) rename src/http/{axum_implementation => v1}/handlers/common/peer_ip.rs (75%) rename src/http/{axum_implementation => v1}/handlers/mod.rs (100%) rename src/http/{axum_implementation => v1}/handlers/scrape.rs (85%) rename src/http/{axum_implementation => v1}/launcher.rs (100%) rename src/http/{axum_implementation => v1}/mod.rs (100%) rename src/http/{axum_implementation => v1}/query.rs (97%) rename src/http/{axum_implementation => v1}/requests/announce.rs (97%) rename src/http/{axum_implementation => v1}/requests/mod.rs (100%) rename src/http/{axum_implementation => v1}/requests/scrape.rs (90%) rename src/http/{axum_implementation => v1}/responses/announce.rs (98%) rename src/http/{axum_implementation => v1}/responses/error.rs (100%) rename src/http/{axum_implementation => v1}/responses/mod.rs (100%) rename src/http/{axum_implementation => v1}/responses/scrape.rs (97%) rename src/http/{axum_implementation => v1}/routes.rs (100%) rename src/http/{axum_implementation => v1}/services/announce.rs (97%) rename src/http/{axum_implementation => v1}/services/mod.rs (100%) rename src/http/{axum_implementation => v1}/services/peer_ip_resolver.rs (95%) rename src/http/{axum_implementation => v1}/services/scrape.rs (94%) diff --git a/src/http/mod.rs b/src/http/mod.rs index 2309ee146..b8aa6b19f 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -12,11 +12,11 @@ use serde::{Deserialize, Serialize}; -pub mod axum_implementation; pub mod percent_encoding; pub mod server; +pub mod v1; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { - Axum, + V1, } diff --git a/src/http/axum_implementation/extractors/announce_request.rs b/src/http/v1/extractors/announce_request.rs similarity index 90% rename from src/http/axum_implementation/extractors/announce_request.rs rename to src/http/v1/extractors/announce_request.rs index 1680cd15c..c0b0451b3 100644 --- a/src/http/axum_implementation/extractors/announce_request.rs +++ b/src/http/v1/extractors/announce_request.rs @@ -5,9 +5,9 @@ use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::axum_implementation::query::Query; -use crate::http::axum_implementation::requests::announce::{Announce, ParseAnnounceQueryError}; -use crate::http::axum_implementation::responses; +use crate::http::v1::query::Query; +use crate::http::v1::requests::announce::{Announce, ParseAnnounceQueryError}; +use crate::http::v1::responses; pub struct ExtractRequest(pub Announce); @@ -53,8 +53,8 @@ mod tests { use std::str::FromStr; use super::extract_announce_from; - use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; - use crate::http::axum_implementation::responses::error::Error; + use crate::http::v1::requests::announce::{Announce, Compact, Event}; + use crate::http::v1::responses::error::Error; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/http/axum_implementation/extractors/authentication_key.rs b/src/http/v1/extractors/authentication_key.rs similarity index 94% rename from src/http/axum_implementation/extractors/authentication_key.rs rename to src/http/v1/extractors/authentication_key.rs index 8ffc4ff12..3b2680a5f 100644 --- a/src/http/axum_implementation/extractors/authentication_key.rs +++ b/src/http/v1/extractors/authentication_key.rs @@ -8,8 +8,8 @@ use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; use serde::Deserialize; -use crate::http::axum_implementation::handlers::common::auth; -use crate::http::axum_implementation::responses; +use crate::http::v1::handlers::common::auth; +use crate::http::v1::responses; use crate::tracker::auth::Key; pub struct Extract(pub Key); @@ -85,7 +85,7 @@ fn custom_error(rejection: &PathRejection) -> responses::error::Error { mod tests { use super::parse_key; - use crate::http::axum_implementation::responses::error::Error; + use crate::http::v1::responses::error::Error; fn assert_error_response(error: &Error, error_message: &str) { assert!( diff --git a/src/http/axum_implementation/extractors/client_ip_sources.rs b/src/http/v1/extractors/client_ip_sources.rs similarity index 93% rename from src/http/axum_implementation/extractors/client_ip_sources.rs rename to src/http/v1/extractors/client_ip_sources.rs index b41478c22..c8b3659f3 100644 --- a/src/http/axum_implementation/extractors/client_ip_sources.rs +++ b/src/http/v1/extractors/client_ip_sources.rs @@ -8,7 +8,7 @@ use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; -use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; +use crate::http::v1::services::peer_ip_resolver::ClientIpSources; pub struct Extract(pub ClientIpSources); diff --git a/src/http/axum_implementation/extractors/mod.rs b/src/http/v1/extractors/mod.rs similarity index 100% rename from src/http/axum_implementation/extractors/mod.rs rename to src/http/v1/extractors/mod.rs diff --git a/src/http/axum_implementation/extractors/scrape_request.rs b/src/http/v1/extractors/scrape_request.rs similarity index 92% rename from src/http/axum_implementation/extractors/scrape_request.rs rename to src/http/v1/extractors/scrape_request.rs index 998728f59..d63470897 100644 --- a/src/http/axum_implementation/extractors/scrape_request.rs +++ b/src/http/v1/extractors/scrape_request.rs @@ -5,9 +5,9 @@ use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::axum_implementation::query::Query; -use crate::http::axum_implementation::requests::scrape::{ParseScrapeQueryError, Scrape}; -use crate::http::axum_implementation::responses; +use crate::http::v1::query::Query; +use crate::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; +use crate::http::v1::responses; pub struct ExtractRequest(pub Scrape); @@ -53,8 +53,8 @@ mod tests { use std::str::FromStr; use super::extract_scrape_from; - use crate::http::axum_implementation::requests::scrape::Scrape; - use crate::http::axum_implementation::responses::error::Error; + use crate::http::v1::requests::scrape::Scrape; + use crate::http::v1::responses::error::Error; use crate::protocol::info_hash::InfoHash; struct TestInfoHash { diff --git a/src/http/axum_implementation/handlers/announce.rs b/src/http/v1/handlers/announce.rs similarity index 85% rename from src/http/axum_implementation/handlers/announce.rs rename to src/http/v1/handlers/announce.rs index ebb8c8586..1f10c3fa4 100644 --- a/src/http/axum_implementation/handlers/announce.rs +++ b/src/http/v1/handlers/announce.rs @@ -7,14 +7,14 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::axum_implementation::extractors::announce_request::ExtractRequest; -use crate::http::axum_implementation::extractors::authentication_key::Extract as ExtractKey; -use crate::http::axum_implementation::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::http::axum_implementation::handlers::common::auth; -use crate::http::axum_implementation::requests::announce::{Announce, Compact, Event}; -use crate::http::axum_implementation::responses::{self, announce}; -use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; -use crate::http::axum_implementation::services::{self, peer_ip_resolver}; +use crate::http::v1::extractors::announce_request::ExtractRequest; +use crate::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::http::v1::handlers::common::auth; +use crate::http::v1::requests::announce::{Announce, Compact, Event}; +use crate::http::v1::responses::{self, announce}; +use crate::http::v1::services::peer_ip_resolver::ClientIpSources; +use crate::http::v1::services::{self, peer_ip_resolver}; use crate::protocol::clock::{Current, Time}; use crate::tracker::auth::Key; use crate::tracker::peer::Peer; @@ -141,9 +141,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::http::axum_implementation::requests::announce::Announce; - use crate::http::axum_implementation::responses; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::requests::announce::Announce; + use crate::http::v1::responses; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; @@ -197,8 +197,8 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; - use crate::http::axum_implementation::handlers::announce::handle_announce; - use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::http::v1::handlers::announce::handle_announce; + use crate::http::v1::handlers::announce::tests::assert_error_response; use crate::tracker::auth; #[tokio::test] @@ -238,8 +238,8 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, sample_client_ip_sources, whitelisted_tracker}; - use crate::http::axum_implementation::handlers::announce::handle_announce; - use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; + use crate::http::v1::handlers::announce::handle_announce; + use crate::http::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { @@ -266,9 +266,9 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, tracker_on_reverse_proxy}; - use crate::http::axum_implementation::handlers::announce::handle_announce; - use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::handlers::announce::handle_announce; + use crate::http::v1::handlers::announce::tests::assert_error_response; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -295,9 +295,9 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, tracker_not_on_reverse_proxy}; - use crate::http::axum_implementation::handlers::announce::handle_announce; - use crate::http::axum_implementation::handlers::announce::tests::assert_error_response; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::handlers::announce::handle_announce; + use crate::http::v1::handlers::announce::tests::assert_error_response; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/http/axum_implementation/handlers/common/auth.rs b/src/http/v1/handlers/common/auth.rs similarity index 95% rename from src/http/axum_implementation/handlers/common/auth.rs rename to src/http/v1/handlers/common/auth.rs index 30971725a..938fc3f01 100644 --- a/src/http/axum_implementation/handlers/common/auth.rs +++ b/src/http/v1/handlers/common/auth.rs @@ -2,7 +2,7 @@ use std::panic::Location; use thiserror::Error; -use crate::http::axum_implementation::responses; +use crate::http::v1::responses; use crate::tracker::auth; #[derive(Debug, Error)] diff --git a/src/http/axum_implementation/handlers/common/mod.rs b/src/http/v1/handlers/common/mod.rs similarity index 100% rename from src/http/axum_implementation/handlers/common/mod.rs rename to src/http/v1/handlers/common/mod.rs diff --git a/src/http/axum_implementation/handlers/common/peer_ip.rs b/src/http/v1/handlers/common/peer_ip.rs similarity index 75% rename from src/http/axum_implementation/handlers/common/peer_ip.rs rename to src/http/v1/handlers/common/peer_ip.rs index df10e5eb1..e182c716b 100644 --- a/src/http/axum_implementation/handlers/common/peer_ip.rs +++ b/src/http/v1/handlers/common/peer_ip.rs @@ -1,5 +1,5 @@ -use crate::http::axum_implementation::responses; -use crate::http::axum_implementation::services::peer_ip_resolver::PeerIpResolutionError; +use crate::http::v1::responses; +use crate::http::v1::services::peer_ip_resolver::PeerIpResolutionError; impl From for responses::error::Error { fn from(err: PeerIpResolutionError) -> Self { @@ -13,8 +13,8 @@ impl From for responses::error::Error { mod tests { use std::panic::Location; - use crate::http::axum_implementation::responses; - use crate::http::axum_implementation::services::peer_ip_resolver::PeerIpResolutionError; + use crate::http::v1::responses; + use crate::http::v1::services::peer_ip_resolver::PeerIpResolutionError; fn assert_error_response(error: &responses::error::Error, error_message: &str) { assert!( diff --git a/src/http/axum_implementation/handlers/mod.rs b/src/http/v1/handlers/mod.rs similarity index 100% rename from src/http/axum_implementation/handlers/mod.rs rename to src/http/v1/handlers/mod.rs diff --git a/src/http/axum_implementation/handlers/scrape.rs b/src/http/v1/handlers/scrape.rs similarity index 85% rename from src/http/axum_implementation/handlers/scrape.rs rename to src/http/v1/handlers/scrape.rs index fd316882d..50f92cd36 100644 --- a/src/http/axum_implementation/handlers/scrape.rs +++ b/src/http/v1/handlers/scrape.rs @@ -4,12 +4,12 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::axum_implementation::extractors::authentication_key::Extract as ExtractKey; -use crate::http::axum_implementation::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::http::axum_implementation::extractors::scrape_request::ExtractRequest; -use crate::http::axum_implementation::requests::scrape::Scrape; -use crate::http::axum_implementation::services::peer_ip_resolver::{self, ClientIpSources}; -use crate::http::axum_implementation::{responses, services}; +use crate::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::http::v1::extractors::scrape_request::ExtractRequest; +use crate::http::v1::requests::scrape::Scrape; +use crate::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use crate::http::v1::{responses, services}; use crate::tracker::auth::Key; use crate::tracker::{ScrapeData, Tracker}; @@ -99,9 +99,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::http::axum_implementation::requests::scrape::Scrape; - use crate::http::axum_implementation::responses; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::requests::scrape::Scrape; + use crate::http::v1::responses; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::Tracker; @@ -147,7 +147,7 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::http::v1::handlers::scrape::handle_scrape; use crate::tracker::{auth, ScrapeData}; #[tokio::test] @@ -189,7 +189,7 @@ mod tests { use std::sync::Arc; use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; - use crate::http::axum_implementation::handlers::scrape::handle_scrape; + use crate::http::v1::handlers::scrape::handle_scrape; use crate::tracker::ScrapeData; #[tokio::test] @@ -212,9 +212,9 @@ mod tests { use std::sync::Arc; use super::{sample_scrape_request, tracker_on_reverse_proxy}; - use crate::http::axum_implementation::handlers::scrape::handle_scrape; - use crate::http::axum_implementation::handlers::scrape::tests::assert_error_response; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::handlers::scrape::handle_scrape; + use crate::http::v1::handlers::scrape::tests::assert_error_response; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -240,9 +240,9 @@ mod tests { use std::sync::Arc; use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; - use crate::http::axum_implementation::handlers::scrape::handle_scrape; - use crate::http::axum_implementation::handlers::scrape::tests::assert_error_response; - use crate::http::axum_implementation::services::peer_ip_resolver::ClientIpSources; + use crate::http::v1::handlers::scrape::handle_scrape; + use crate::http::v1::handlers::scrape::tests::assert_error_response; + use crate::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/http/axum_implementation/launcher.rs b/src/http/v1/launcher.rs similarity index 100% rename from src/http/axum_implementation/launcher.rs rename to src/http/v1/launcher.rs diff --git a/src/http/axum_implementation/mod.rs b/src/http/v1/mod.rs similarity index 100% rename from src/http/axum_implementation/mod.rs rename to src/http/v1/mod.rs diff --git a/src/http/axum_implementation/query.rs b/src/http/v1/query.rs similarity index 97% rename from src/http/axum_implementation/query.rs rename to src/http/v1/query.rs index 8b01e9db7..45484ea38 100644 --- a/src/http/axum_implementation/query.rs +++ b/src/http/v1/query.rs @@ -174,7 +174,7 @@ impl std::fmt::Display for FieldValuePairSet { mod tests { mod url_query { - use crate::http::axum_implementation::query::Query; + use crate::http::v1::query::Query; #[test] fn should_parse_the_query_params_from_an_url_query_string() { @@ -227,7 +227,7 @@ mod tests { } mod should_allow_more_than_one_value_for_the_same_param { - use crate::http::axum_implementation::query::Query; + use crate::http::v1::query::Query; #[test] fn instantiated_from_a_vector() { @@ -249,7 +249,7 @@ mod tests { } mod should_be_displayed { - use crate::http::axum_implementation::query::Query; + use crate::http::v1::query::Query; #[test] fn with_one_param() { @@ -270,7 +270,7 @@ mod tests { } mod param_name_value_pair { - use crate::http::axum_implementation::query::NameValuePair; + use crate::http::v1::query::NameValuePair; #[test] fn should_parse_a_single_query_param() { diff --git a/src/http/axum_implementation/requests/announce.rs b/src/http/v1/requests/announce.rs similarity index 97% rename from src/http/axum_implementation/requests/announce.rs rename to src/http/v1/requests/announce.rs index 6e357ea6d..eeab97d5f 100644 --- a/src/http/axum_implementation/requests/announce.rs +++ b/src/http/v1/requests/announce.rs @@ -5,9 +5,9 @@ use std::str::FromStr; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::http::axum_implementation::query::{ParseQueryError, Query}; -use crate::http::axum_implementation::responses; use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::http::v1::query::{ParseQueryError, Query}; +use crate::http::v1::responses; use crate::protocol::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; @@ -280,8 +280,8 @@ mod tests { mod announce_request { - use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::announce::{ + use crate::http::v1::query::Query; + use crate::http::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; use crate::protocol::info_hash::InfoHash; @@ -350,8 +350,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::announce::{ + use crate::http::v1::query::Query; + use crate::http::v1::requests::announce::{ Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; diff --git a/src/http/axum_implementation/requests/mod.rs b/src/http/v1/requests/mod.rs similarity index 100% rename from src/http/axum_implementation/requests/mod.rs rename to src/http/v1/requests/mod.rs diff --git a/src/http/axum_implementation/requests/scrape.rs b/src/http/v1/requests/scrape.rs similarity index 90% rename from src/http/axum_implementation/requests/scrape.rs rename to src/http/v1/requests/scrape.rs index 505be566e..6257f0733 100644 --- a/src/http/axum_implementation/requests/scrape.rs +++ b/src/http/v1/requests/scrape.rs @@ -3,9 +3,9 @@ use std::panic::Location; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::http::axum_implementation::query::Query; -use crate::http::axum_implementation::responses; use crate::http::percent_encoding::percent_decode_info_hash; +use crate::http::v1::query::Query; +use crate::http::v1::responses; use crate::protocol::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; @@ -85,8 +85,8 @@ mod tests { mod scrape_request { - use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH}; + use crate::http::v1::query::Query; + use crate::http::v1::requests::scrape::{Scrape, INFO_HASH}; use crate::protocol::info_hash::InfoHash; #[test] @@ -107,8 +107,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::http::axum_implementation::query::Query; - use crate::http::axum_implementation::requests::scrape::{Scrape, INFO_HASH}; + use crate::http::v1::query::Query; + use crate::http::v1::requests::scrape::{Scrape, INFO_HASH}; #[test] fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { diff --git a/src/http/axum_implementation/responses/announce.rs b/src/http/v1/responses/announce.rs similarity index 98% rename from src/http/axum_implementation/responses/announce.rs rename to src/http/v1/responses/announce.rs index 81651767b..8b178ff7e 100644 --- a/src/http/axum_implementation/responses/announce.rs +++ b/src/http/v1/responses/announce.rs @@ -8,7 +8,7 @@ use bip_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut} use serde::{self, Deserialize, Serialize}; use thiserror::Error; -use crate::http::axum_implementation::responses; +use crate::http::v1::responses; use crate::tracker::{self, AnnounceData}; /// Normal (non compact) "announce" response @@ -250,7 +250,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use super::{NonCompact, Peer}; - use crate::http::axum_implementation::responses::announce::{Compact, CompactPeer}; + use crate::http::v1::responses::announce::{Compact, CompactPeer}; // Some ascii values used in tests: // diff --git a/src/http/axum_implementation/responses/error.rs b/src/http/v1/responses/error.rs similarity index 100% rename from src/http/axum_implementation/responses/error.rs rename to src/http/v1/responses/error.rs diff --git a/src/http/axum_implementation/responses/mod.rs b/src/http/v1/responses/mod.rs similarity index 100% rename from src/http/axum_implementation/responses/mod.rs rename to src/http/v1/responses/mod.rs diff --git a/src/http/axum_implementation/responses/scrape.rs b/src/http/v1/responses/scrape.rs similarity index 97% rename from src/http/axum_implementation/responses/scrape.rs rename to src/http/v1/responses/scrape.rs index 3fc34a0e5..5cbe6502e 100644 --- a/src/http/axum_implementation/responses/scrape.rs +++ b/src/http/v1/responses/scrape.rs @@ -55,7 +55,7 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { - use crate::http::axum_implementation::responses::scrape::Bencoded; + use crate::http::v1::responses::scrape::Bencoded; use crate::protocol::info_hash::InfoHash; use crate::tracker::torrent::SwarmMetadata; use crate::tracker::ScrapeData; diff --git a/src/http/axum_implementation/routes.rs b/src/http/v1/routes.rs similarity index 100% rename from src/http/axum_implementation/routes.rs rename to src/http/v1/routes.rs diff --git a/src/http/axum_implementation/services/announce.rs b/src/http/v1/services/announce.rs similarity index 97% rename from src/http/axum_implementation/services/announce.rs rename to src/http/v1/services/announce.rs index 73d6ed468..a8b9f0d06 100644 --- a/src/http/axum_implementation/services/announce.rs +++ b/src/http/v1/services/announce.rs @@ -77,8 +77,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::http::axum_implementation::services::announce::invoke; - use crate::http::axum_implementation::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + use crate::http::v1::services::announce::invoke; + use crate::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; use crate::tracker::peer::Peer; use crate::tracker::torrent::SwarmStats; use crate::tracker::{statistics, AnnounceData, Tracker}; diff --git a/src/http/axum_implementation/services/mod.rs b/src/http/v1/services/mod.rs similarity index 100% rename from src/http/axum_implementation/services/mod.rs rename to src/http/v1/services/mod.rs diff --git a/src/http/axum_implementation/services/peer_ip_resolver.rs b/src/http/v1/services/peer_ip_resolver.rs similarity index 95% rename from src/http/axum_implementation/services/peer_ip_resolver.rs rename to src/http/v1/services/peer_ip_resolver.rs index fae1e4ec0..c7bc183b4 100644 --- a/src/http/axum_implementation/services/peer_ip_resolver.rs +++ b/src/http/v1/services/peer_ip_resolver.rs @@ -73,7 +73,7 @@ mod tests { use std::str::FromStr; use super::invoke; - use crate::http::axum_implementation::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + use crate::http::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_connection_info() { @@ -112,7 +112,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; - use crate::http::axum_implementation::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + use crate::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { diff --git a/src/http/axum_implementation/services/scrape.rs b/src/http/v1/services/scrape.rs similarity index 94% rename from src/http/axum_implementation/services/scrape.rs rename to src/http/v1/services/scrape.rs index b48bab642..b6f319375 100644 --- a/src/http/axum_implementation/services/scrape.rs +++ b/src/http/v1/services/scrape.rs @@ -77,10 +77,8 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; - use crate::http::axum_implementation::services::scrape::invoke; - use crate::http::axum_implementation::services::scrape::tests::{ - public_tracker, sample_info_hash, sample_info_hashes, sample_peer, - }; + use crate::http::v1::services::scrape::invoke; + use crate::http::v1::services::scrape::tests::{public_tracker, sample_info_hash, sample_info_hashes, sample_peer}; use crate::tracker::torrent::SwarmMetadata; use crate::tracker::{statistics, ScrapeData, Tracker}; @@ -169,10 +167,8 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; - use crate::http::axum_implementation::services::scrape::fake; - use crate::http::axum_implementation::services::scrape::tests::{ - public_tracker, sample_info_hash, sample_info_hashes, sample_peer, - }; + use crate::http::v1::services::scrape::fake; + use crate::http::v1::services::scrape::tests::{public_tracker, sample_info_hash, sample_info_hashes, sample_peer}; use crate::tracker::{statistics, ScrapeData, Tracker}; #[tokio::test] diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 70f512a39..e0091958b 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -6,7 +6,7 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use crate::http::axum_implementation::launcher; +use crate::http::v1::launcher; use crate::http::Version; use crate::tracker; @@ -15,14 +15,14 @@ pub struct ServerJobStarted(); pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { match version { - Version::Axum => start_axum(config, tracker.clone()).await, + Version::V1 => start_v1(config, tracker.clone()).await, } } /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. -async fn start_axum(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { +async fn start_v1(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .bind_address .parse::() diff --git a/src/setup.rs b/src/setup.rs index ee32f5a81..86de0723c 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -51,7 +51,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::Axum).await); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::V1).await); } // Start HTTP API diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs index 1b07b987a..730da93d5 100644 --- a/tests/http_tracker.rs +++ b/tests/http_tracker.rs @@ -6,1435 +6,1438 @@ mod common; mod http; -pub type Axum = torrust_tracker::http::axum_implementation::launcher::Launcher; +pub type V1 = torrust_tracker::http::v1::launcher::Launcher; -mod test_environment_for_http_tracker { - use torrust_tracker_test_helpers::configuration; +mod http_tracker { - use crate::http::test_environment::running_test_environment; - use crate::Axum; + mod v1 { - #[tokio::test] - async fn should_be_started_and_stopped() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + use torrust_tracker_test_helpers::configuration; - test_env.stop().await; - } -} + use crate::http::test_environment::running_test_environment; + use crate::V1; -mod http_tracker { + #[tokio::test] + async fn test_environment_should_be_started_and_stopped() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - mod for_all_config_modes { + test_env.stop().await; + } - mod and_running_on_reverse_proxy { - use torrust_tracker_test_helpers::configuration; + mod for_all_config_modes { - use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Axum; + mod and_running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; - #[tokio::test] - async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { - // If the tracker is running behind a reverse proxy, the peer IP is the - // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. + use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::test_environment::running_test_environment; + use crate::V1; - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + #[tokio::test] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + // If the tracker is running behind a reverse proxy, the peer IP is the + // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - let params = QueryBuilder::default().query().params(); + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let params = QueryBuilder::default().query().params(); - assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - test_env.stop().await; - } + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - #[tokio::test] - async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + test_env.stop().await; + } - let params = QueryBuilder::default().query().params(); + #[tokio::test] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - let response = Client::new(*test_env.bind_address()) - .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") - .await; + let params = QueryBuilder::default().query().params(); - assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + let response = Client::new(*test_env.bind_address()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; - test_env.stop().await; - } - } + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - mod receiving_an_announce_request { - - // Announce request documentation: - // - // BEP 03. The BitTorrent Protocol Specification - // https://www.bittorrent.org/beps/bep_0003.html - // - // BEP 23. Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Announce - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::str::FromStr; - - use local_ip_address::local_ip; - use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_announce_response, assert_bad_announce_request_error_response, - assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, - assert_compact_announce_response, assert_empty_announce_response, assert_is_announce_response, - assert_missing_query_params_for_announce_request_error_response, - }; - use crate::http::client::Client; - use crate::http::requests::announce::{Compact, QueryBuilder}; - use crate::http::responses; - use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; - use crate::http::test_environment::running_test_environment; - use crate::Axum; - - #[tokio::test] - async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.remove_optional_params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; + test_env.stop().await; + } } - #[tokio::test] - async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + mod receiving_an_announce_request { - let response = Client::new(*test_env.bind_address()).get("announce").await; + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::str::FromStr; + + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{ + assert_announce_response, assert_bad_announce_request_error_response, + assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, + assert_compact_announce_response, assert_empty_announce_response, assert_is_announce_response, + assert_missing_query_params_for_announce_request_error_response, + }; + use crate::http::client::Client; + use crate::http::requests::announce::{Compact, QueryBuilder}; + use crate::http::responses; + use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::http::test_environment::running_test_environment; + use crate::V1; - assert_missing_query_params_for_announce_request_error_response(response).await; + #[tokio::test] + async fn should_respond_if_only_the_mandatory_fields_are_provided() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - test_env.stop().await; - } + let mut params = QueryBuilder::default().query().params(); - #[tokio::test] - async fn should_fail_when_url_query_parameters_are_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + params.remove_optional_params(); - let invalid_query_param = "a=b=c"; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()) - .get(&format!("announce?{invalid_query_param}")) - .await; + assert_is_announce_response(response).await; - assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let response = Client::new(*test_env.bind_address()).get("announce").await; - // Without `info_hash` param + assert_missing_query_params_for_announce_request_error_response(response).await; - let mut params = QueryBuilder::default().query().params(); + test_env.stop().await; + } - params.info_hash = None; + #[tokio::test] + async fn should_fail_when_url_query_parameters_are_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let invalid_query_param = "a=b=c"; - assert_bad_announce_request_error_response(response, "missing param info_hash").await; + let response = Client::new(*test_env.bind_address()) + .get(&format!("announce?{invalid_query_param}")) + .await; - // Without `peer_id` param + assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; - let mut params = QueryBuilder::default().query().params(); + test_env.stop().await; + } - params.peer_id = None; + #[tokio::test] + async fn should_fail_when_a_mandatory_field_is_missing() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + // Without `info_hash` param - assert_bad_announce_request_error_response(response, "missing param peer_id").await; + let mut params = QueryBuilder::default().query().params(); - // Without `port` param + params.info_hash = None; - let mut params = QueryBuilder::default().query().params(); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - params.port = None; + assert_bad_announce_request_error_response(response, "missing param info_hash").await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + // Without `peer_id` param - assert_bad_announce_request_error_response(response, "missing param port").await; + let mut params = QueryBuilder::default().query().params(); - test_env.stop().await; - } + params.peer_id = None; - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let mut params = QueryBuilder::default().query().params(); + assert_bad_announce_request_error_response(response, "missing param peer_id").await; - for invalid_value in &invalid_info_hashes() { - params.set("info_hash", invalid_value); + // Without `port` param - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let mut params = QueryBuilder::default().query().params(); - assert_cannot_parse_query_params_error_response(response, "").await; - } + params.port = None; - test_env.stop().await; - } + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - #[tokio::test] - async fn should_not_fail_when_the_peer_address_param_is_invalid() { - // AnnounceQuery does not even contain the `peer_addr` - // The peer IP is obtained in two ways: - // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. - // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. + assert_bad_announce_request_error_response(response, "missing param port").await; - let test_env = running_test_environment::(configuration::ephemeral()).await; + test_env.stop().await; + } - let mut params = QueryBuilder::default().query().params(); + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + let mut params = QueryBuilder::default().query().params(); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); - assert_is_announce_response(response).await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - test_env.stop().await; - } + assert_cannot_parse_query_params_error_response(response, "").await; + } - #[tokio::test] - async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - let mut params = QueryBuilder::default().query().params(); + let test_env = running_test_environment::(configuration::ephemeral()).await; - let invalid_values = ["-1", "1.1", "a"]; + let mut params = QueryBuilder::default().query().params(); - for invalid_value in invalid_values { - params.set("downloaded", invalid_value); + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - assert_bad_announce_request_error_response(response, "invalid param value").await; + assert_is_announce_response(response).await; + + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = ["-1", "1.1", "a"]; - let invalid_values = ["-1", "1.1", "a"]; + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); - for invalid_value in invalid_values { - params.set("uploaded", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = ["-1", "1.1", "a"]; - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "-qB0000000000000000", // 19 bytes - "-qB000000000000000000", // 21 bytes - ]; + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); - for invalid_value in invalid_values { - params.set("peer_id", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; - let invalid_values = ["-1", "1.1", "a"]; + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); - for invalid_value in invalid_values { - params.set("port", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_port_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = ["-1", "1.1", "a"]; - let invalid_values = ["-1", "1.1", "a"]; + for invalid_value in invalid_values { + params.set("port", invalid_value); - for invalid_value in invalid_values { - params.set("left", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_left_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_event_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = ["-1", "1.1", "a"]; - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "Started", // It should be lowercase to be valid: `started` - "Stopped", // It should be lowercase to be valid: `stopped` - "Completed", // It should be lowercase to be valid: `completed` - ]; + for invalid_value in invalid_values { + params.set("left", invalid_value); - for invalid_value in invalid_values { - params.set("event", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_event_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - #[tokio::test] - async fn should_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let mut params = QueryBuilder::default().query().params(); - let mut params = QueryBuilder::default().query().params(); + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase to be valid: `started` + "Stopped", // It should be lowercase to be valid: `stopped` + "Completed", // It should be lowercase to be valid: `completed` + ]; - let invalid_values = ["-1", "1.1", "a"]; + for invalid_value in invalid_values { + params.set("event", invalid_value); - for invalid_value in invalid_values { - params.set("compact", invalid_value); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - assert_bad_announce_request_error_response(response, "invalid param value").await; + test_env.stop().await; } - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + #[tokio::test] + async fn should_fail_when_the_compact_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) - .query(), - ) - .await; + let mut params = QueryBuilder::default().query().params(); - assert_announce_response( - response, - &Announce { - complete: 1, // the peer for this test - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![], - }, - ) - .await; - - test_env.stop().await; - } + let invalid_values = ["-1", "1.1", "a"]; - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + for invalid_value in invalid_values { + params.set("compact", invalid_value); - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + assert_bad_announce_request_error_response(response, "invalid param value").await; + } - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + test_env.stop().await; + } - // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .query(), + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .query(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![], + }, ) .await; - // It should only contain the previously announced peer - assert_announce_response( - response, - &Announce { - complete: 2, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(previously_announced_peer)], - }, - ) - .await; - - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Announce a peer using IPV4 - let peer_using_ipv4 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) - .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; - - // Announce a peer using IPV6 - let peer_using_ipv6 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - 8080, - )) - .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; - - // Announce the new Peer. - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000003")) - .query(), + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2. This new peer is non included on the response peer list + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .query(), + ) + .await; + + // It should only contain the previously announced peer + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(previously_announced_peer)], + }, ) .await; - // The newly announced peer is not included on the response peer list, - // but all the previously announced peers should be included regardless the IP version they are using. - assert_announce_response( - response, - &Announce { - complete: 3, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = PeerBuilder::default().build(); - - // Add a peer - test_env.add_torrent_peer(&info_hash, &peer).await; - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer.peer_id) - .query(); + test_env.stop().await; + } - assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Announce a peer using IPV4 + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + + // Announce a peer using IPV6 + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + )) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + + // Announce the new Peer. + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000003")) + .query(), + ) + .await; + + // The newly announced peer is not included on the response peer list, + // but all the previously announced peers should be included regardless the IP version they are using. + assert_announce_response( + response, + &Announce { + complete: 3, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], + }, + ) + .await; - let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; + test_env.stop().await; + } - assert_empty_announce_response(response).await; + #[tokio::test] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().build(); - #[tokio::test] - async fn should_return_the_compact_response() { - // Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html + // Add a peer + test_env.add_torrent_peer(&info_hash, &peer).await; - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .query(); - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + assert_empty_announce_response(response).await; - // Announce the new Peer 2 accepting compact responses - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_compact(Compact::Accepted) - .query(), - ) - .await; + test_env.stop().await; + } - let expected_response = responses::announce::Compact { - complete: 2, - incomplete: 0, - interval: 120, - min_interval: 120, - peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), - }; + #[tokio::test] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html - assert_compact_announce_response(response, &expected_response).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - #[tokio::test] - async fn should_not_return_the_compact_response_by_default() { - // code-review: the HTTP tracker does not return the compact response by default if the "compact" - // param is not provided in the announce URL. The BEP 23 suggest to do so. + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + // Announce the new Peer 2 accepting compact responses + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); + let expected_response = responses::announce::Compact { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + assert_compact_announce_response(response, &expected_response).await; - // Announce the new Peer 2 without passing the "compact" param - // By default it should respond with the compact peer list - // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .without_compact() - .query(), - ) - .await; + test_env.stop().await; + } - assert!(!is_a_compact_announce_response(response).await); + #[tokio::test] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. - test_env.stop().await; - } + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - async fn is_a_compact_announce_response(response: Response) -> bool { - let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); - compact_announce.is_ok() - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - #[tokio::test] - async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - let stats = test_env.tracker.get_stats().await; + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; - assert_eq!(stats.tcp4_connections_handled, 1); + assert!(!is_a_compact_announce_response(response).await); - drop(stats); + test_env.stop().await; + } - test_env.stop().await; - } + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } - #[tokio::test] - async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + #[tokio::test] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp6_connections_handled, 1); + assert_eq!(stats.tcp4_connections_handled, 1); - drop(stats); + drop(stats); - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. + #[tokio::test] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; + let stats = test_env.tracker.get_stats().await; - let stats = test_env.tracker.get_stats().await; + assert_eq!(stats.tcp6_connections_handled, 1); - assert_eq!(stats.tcp6_connections_handled, 0); + drop(stats); - drop(stats); + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. - #[tokio::test] - async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, 1); + assert_eq!(stats.tcp6_connections_handled, 0); - drop(stats); + drop(stats); - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + #[tokio::test] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 1); + assert_eq!(stats.tcp4_announces_handled, 1); - drop(stats); + drop(stats); - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. + #[tokio::test] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; + let stats = test_env.tracker.get_stats().await; - let stats = test_env.tracker.get_stats().await; + assert_eq!(stats.tcp6_announces_handled, 1); - assert_eq!(stats.tcp6_announces_handled, 0); + drop(stats); - drop(stats); + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() + { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. - #[tokio::test] - async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client_ip = local_ip().unwrap(); + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; - let client = Client::bind(*test_env.bind_address(), client_ip); + let stats = test_env.tracker.get_stats().await; - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); + assert_eq!(stats.tcp6_announces_handled, 0); - client.announce(&announce_query).await; + drop(stats); - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; + test_env.stop().await; + } - assert_eq!(peer_addr.ip(), client_ip); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + #[tokio::test] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. + let client = Client::bind(*test_env.bind_address(), client_ip); - client <-> tracker <-> Internet - 127.0.0.1 external_ip = "2.137.87.41" - */ + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; + client.announce(&announce_query).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; - let client = Client::bind(*test_env.bind_address(), client_ip); + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); + test_env.stop().await; + } - client.announce(&announce_query).await; + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. + let client = Client::bind(*test_env.bind_address(), client_ip); - client <-> tracker <-> Internet - ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" - */ + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; + client.announce(&announce_query).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; - let client = Client::bind(*test_env.bind_address(), client_ip); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); + test_env.stop().await; + } - client.announce(&announce_query).await; + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; - test_env.stop().await; - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; - #[tokio::test] - async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( - ) { - /* - client <-> http proxy <-> tracker <-> Internet - ip: header: config: peer addr: - 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 - */ + let client = Client::bind(*test_env.bind_address(), client_ip); - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + client.announce(&announce_query).await; - let client = Client::new(*test_env.bind_address()); + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; - let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - client - .announce_with_header( - &announce_query, - "X-Forwarded-For", - "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", - ) - .await; + test_env.stop().await; + } - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ - assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - test_env.stop().await; - } - } + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - mod receiving_an_scrape_request { - - // Scrape documentation: - // - // BEP 48. Tracker Protocol Extension: Scrape - // https://www.bittorrent.org/beps/bep_0048.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Scrape - - use std::net::IpAddr; - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, - assert_scrape_response, - }; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::requests::scrape::QueryBuilder; - use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Axum; - - //#[tokio::test] - #[allow(dead_code)] - async fn should_fail_when_the_request_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(*test_env.bind_address()).get("scrape").await; - - assert_missing_query_params_for_scrape_request_error_response(response).await; - - test_env.stop().await; - } + let client = Client::new(*test_env.bind_address()); - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - let mut params = QueryBuilder::default().query().params(); + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; - for invalid_value in &invalid_info_hashes() { - params.set_one_info_hash_param(invalid_value); + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); - assert_cannot_parse_query_params_error_response(response, "").await; + test_env.stop().await; } - - test_env.stop().await; } - #[tokio::test] - async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + mod receiving_an_scrape_request { - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + // Scrape documentation: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + use std::net::IpAddr; + use std::str::FromStr; - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; - assert_scrape_response(response, &expected_scrape_response).await; + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::http::asserts::{ + assert_cannot_parse_query_params_error_response, + assert_missing_query_params_for_scrape_request_error_response, assert_scrape_response, + }; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::requests::scrape::QueryBuilder; + use crate::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::http::test_environment::running_test_environment; + use crate::V1; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let response = Client::new(*test_env.bind_address()).get("scrape").await; + + assert_missing_query_params_for_scrape_request_error_response(response).await; + + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - #[tokio::test] - async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let mut params = QueryBuilder::default().query().params(); - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() - .build(), - ) - .await; + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + assert_cannot_parse_query_params_error_response(response, "").await; + } - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 1, - downloaded: 0, - incomplete: 0, - }, - ) - .build(); + test_env.stop().await; + } - assert_scrape_response(response, &expected_scrape_response).await; + #[tokio::test] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() + { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } - #[tokio::test] - async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + #[tokio::test] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_accept_multiple_infohashes() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + #[tokio::test] + async fn should_accept_multiple_infohashes() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .add_info_hash(&info_hash1) - .add_info_hash(&info_hash2) - .query(), - ) - .await; + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; - let expected_scrape_response = ResponseBuilder::default() - .add_file(info_hash1.bytes(), File::zeroed()) - .add_file(info_hash2.bytes(), File::zeroed()) - .build(); + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); - assert_scrape_response(response, &expected_scrape_response).await; + assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + #[tokio::test] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp4_scrapes_handled, 1); + assert_eq!(stats.tcp4_scrapes_handled, 1); - drop(stats); + drop(stats); - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + #[tokio::test] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - let stats = test_env.tracker.get_stats().await; + let stats = test_env.tracker.get_stats().await; - assert_eq!(stats.tcp6_scrapes_handled, 1); + assert_eq!(stats.tcp6_scrapes_handled, 1); - drop(stats); + drop(stats); - test_env.stop().await; + test_env.stop().await; + } } } - } - mod configured_as_whitelisted { + mod configured_as_whitelisted { - mod and_receiving_an_announce_request { - use std::str::FromStr; + mod and_receiving_an_announce_request { + use std::str::FromStr; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; - use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Axum; + use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::test_environment::running_test_environment; + use crate::V1; - #[tokio::test] - async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + #[tokio::test] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_torrent_not_in_whitelist_error_response(response).await; - - test_env.stop().await; - } + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; - #[tokio::test] - async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; + assert_torrent_not_in_whitelist_error_response(response).await; - assert_is_announce_response(response).await; + test_env.stop().await; + } - test_env.stop().await; - } - } + #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - mod receiving_an_scrape_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Axum; - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; - assert_scrape_response(response, &expected_scrape_response).await; + assert_is_announce_response(response).await; - test_env.stop().await; + test_env.stop().await; + } } - #[tokio::test] - async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::assert_scrape_response; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::test_environment::running_test_environment; + use crate::V1; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } - test_env.stop().await; + #[tokio::test] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } } } - } - mod configured_as_private { + mod configured_as_private { - mod and_receiving_an_announce_request { - use std::str::FromStr; - use std::time::Duration; + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; - use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::Axum; + use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; + use crate::http::client::Client; + use crate::http::requests::announce::QueryBuilder; + use crate::http::test_environment::running_test_environment; + use crate::V1; - #[tokio::test] - async fn should_respond_to_authenticated_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_respond_to_authenticated_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .announce(&QueryBuilder::default().query()) - .await; + let response = Client::authenticated(*test_env.bind_address(), key.id()) + .announce(&QueryBuilder::default().query()) + .await; - assert_is_announce_response(response).await; + assert_is_announce_response(response).await; - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; - assert_authentication_error_response(response).await; + assert_authentication_error_response(response).await; - test_env.stop().await; - } + test_env.stop().await; + } - #[tokio::test] - async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let invalid_key = "INVALID_KEY"; + let invalid_key = "INVALID_KEY"; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*test_env.bind_address()) .get(&format!( "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) .await; - assert_authentication_error_response(response).await; - } + assert_authentication_error_response(response).await; + } - #[tokio::test] - async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - // The tracker does not have this key - let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + // The tracker does not have this key + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(*test_env.bind_address(), unregistered_key) - .announce(&QueryBuilder::default().query()) - .await; + let response = Client::authenticated(*test_env.bind_address(), unregistered_key) + .announce(&QueryBuilder::default().query()) + .await; - assert_authentication_error_response(response).await; + assert_authentication_error_response(response).await; - test_env.stop().await; + test_env.stop().await; + } } - } - mod receiving_an_scrape_request { + mod receiving_an_scrape_request { - use std::str::FromStr; - use std::time::Duration; + use std::str::FromStr; + use std::time::Duration; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::Axum; + use crate::common::fixtures::PeerBuilder; + use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; + use crate::http::client::Client; + use crate::http::requests; + use crate::http::responses::scrape::{File, ResponseBuilder}; + use crate::http::test_environment::running_test_environment; + use crate::V1; - #[tokio::test] - async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let invalid_key = "INVALID_KEY"; + let invalid_key = "INVALID_KEY"; - let response = Client::new(*test_env.bind_address()) - .get(&format!( - "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" - )) - .await; + let response = Client::new(*test_env.bind_address()) + .get(&format!( + "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + )) + .await; - assert_authentication_error_response(response).await; - } - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + assert_authentication_error_response(response).await; + } - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - assert_scrape_response(response, &expected_scrape_response).await; + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env.stop().await; - } + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - #[tokio::test] - async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + assert_scrape_response(response, &expected_scrape_response).await; - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; + test_env.stop().await; + } - test_env.stop().await; - } + #[tokio::test] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), key.id()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { - // There is not authentication error - // code-review: should this really be this way? + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + // code-review: should this really be this way? - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; - let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(*test_env.bind_address(), false_key) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; + let response = Client::authenticated(*test_env.bind_address(), false_key) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - assert_scrape_response(response, &expected_scrape_response).await; + assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + test_env.stop().await; + } } } - } - mod configured_as_private_and_whitelisted { + mod configured_as_private_and_whitelisted { - mod and_receiving_an_announce_request {} + mod and_receiving_an_announce_request {} - mod receiving_an_scrape_request {} + mod receiving_an_scrape_request {} + } } } From 034295bd39b10047279ded3a8171eba13a55be00 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 09:57:48 +0000 Subject: [PATCH 404/435] feat: update cargo dep r2d2_mysql from 21 to 23 --- Cargo.lock | 1451 ++++++++++++++++++++-------------------- Cargo.toml | 2 +- src/databases/mysql.rs | 6 +- src/tracker/auth.rs | 5 +- 4 files changed, 716 insertions(+), 748 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce179501f..1fbd61c19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,11 +28,22 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" -version = "0.7.19" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -46,15 +57,6 @@ dependencies = [ "libc", ] -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "aquatic_udp_protocol" version = "0.2.0" @@ -71,12 +73,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - [[package]] name = "arrayvec" version = "0.7.2" @@ -85,26 +81,15 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -113,9 +98,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.1" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48" +checksum = "2fb79c228270dcf2426e74864cabc94babb5dbab01a4314e702d2f16540e1591" dependencies = [ "async-trait", "axum-core", @@ -157,9 +142,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.3.0" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92" +checksum = "b2f958c80c248b34b9a877a643811be8dbca03ca5ba827f2b63baf3a81e5fc4e" dependencies = [ "async-trait", "bytes", @@ -174,9 +159,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8456dab8f11484979a86651da8e619b355ede5d61a160755155f6c344bd18c47" +checksum = "25e4a990e1593e286b1b96e6df76da9dbcb84945a810287ca8101f1a4f000f61" dependencies = [ "arc-swap", "bytes", @@ -186,7 +171,7 @@ dependencies = [ "hyper", "pin-project-lite", "rustls", - "rustls-pemfile 1.0.1", + "rustls-pemfile 1.0.2", "tokio", "tokio-rustls", "tower-service", @@ -202,33 +187,32 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide 0.6.2", + "miniz_oxide", "object", "rustc-demangle", ] [[package]] -name = "base-x" -version = "0.2.11" +name = "base64" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.13.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "bigdecimal" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256" +checksum = "6aaf33151a6429fe9211d1b276eafdf70cdff28b071e76c0b0e1503221ea3744" dependencies = [ - "num-bigint 0.3.3", + "num-bigint", "num-integer", "num-traits", - "serde", ] [[package]] @@ -239,25 +223,21 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.58.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f8523b410d7187a43085e7e064416ea32ded16bd0a4e6fc025e21616d01258f" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags", "cexpr", "clang-sys", - "clap", - "env_logger", "lazy_static", "lazycell", - "log", "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "which", ] [[package]] @@ -277,9 +257,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitvec" -version = "0.22.3" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5237f00a8c86130a0cc317830e558b966dd7850d48a953d998c813f01a41b527" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", @@ -289,20 +269,56 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.9.0" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] -name = "block-buffer" -version = "0.10.3" +name = "borsh" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "40f9ca3698b2e4cb7c15571db0abc5551dca417a21ae8140460b50309bb2cc62" dependencies = [ - "generic-array", + "borsh-derive", + "hashbrown 0.13.2", +] + +[[package]] +name = "borsh-derive" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598b3eacc6db9c3ee57b22707ad8f6a8d2f6d442bfe24ffeb8cbb70ca59e6a35" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186b734fa1c9f6743e90c95d7233c9faab6360d1a96d4ffa19d9cfd1e9350f8a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99b7ff1008316626f485991b960ade129253d4034014616b94f309a15366cc49" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -323,9 +339,31 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.11.1" +version = "3.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" + +[[package]] +name = "bytecheck" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" +checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f" +dependencies = [ + "bytecheck_derive", + "ptr_meta", + "simdutf8", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] [[package]] name = "byteorder" @@ -335,23 +373,23 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "cc" -version = "1.0.73" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cexpr" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 5.1.2", + "nom", ] [[package]] @@ -362,51 +400,36 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ "iana-time-zone", "js-sys", "num-integer", "num-traits", "serde", - "time 0.1.44", + "time 0.1.45", "wasm-bindgen", "winapi", ] [[package]] name = "clang-sys" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" dependencies = [ "glob", "libc", "libloading", ] -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", -] - [[package]] name = "cmake" -version = "0.1.48" +version = "0.1.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" +checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" dependencies = [ "cc", ] @@ -423,14 +446,14 @@ dependencies = [ [[package]] name = "config" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f1667b8320afa80d69d8bbe40830df2c8a06003d86f73d8e003b2c48df416d" +checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" dependencies = [ "async-trait", "json5", "lazy_static", - "nom 7.1.1", + "nom", "pathdiff", "ron", "rust-ini", @@ -440,12 +463,6 @@ dependencies = [ "yaml-rust", ] -[[package]] -name = "const_fn" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" - [[package]] name = "convert_case" version = "0.4.0" @@ -486,6 +503,73 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" +dependencies = [ + "cfg-if", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +dependencies = [ + "cfg-if", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -498,9 +582,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.80" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" dependencies = [ "cc", "cxxbridge-flags", @@ -510,9 +594,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.80" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" dependencies = [ "cc", "codespan-reporting", @@ -525,15 +609,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.80" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" [[package]] name = "cxxbridge-macro" -version = "1.0.80" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" dependencies = [ "proc-macro2", "quote", @@ -542,9 +626,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.1" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4529658bdda7fd6769b8614be250cdcfc3aeb0ee72fe66f9e41e5e5eb73eac02" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ "darling_core", "darling_macro", @@ -552,23 +636,23 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.1" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "649c91bc01e8b1eac09fb91e8dbc7d517684ca6be8ebc75bb9cafc894f9fdb6f" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", + "strsim", "syn", ] [[package]] name = "darling_macro" -version = "0.14.1" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc69c5bfcbd2fc09a0f38451d2daf0e372e367986a83906d1b0dbc88134fb5" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", @@ -584,15 +668,15 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", + "rustc_version", "syn", ] [[package]] name = "derive_utils" -version = "0.11.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532b4c15dccee12c7044f1fcad956e98410860b22231e44a3b827464797ca7bf" +checksum = "7590f99468735a318c254ca9158d0c065aa9b5312896b5a043b5e39bc96f5fa2" dependencies = [ "proc-macro2", "quote", @@ -607,29 +691,14 @@ checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" [[package]] name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer", "crypto-common", ] -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - [[package]] name = "dlv-list" version = "0.3.0" @@ -644,30 +713,38 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encoding_rs" -version = "0.8.31" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] [[package]] -name = "env_logger" -version = "0.8.4" +name = "errno" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", + "errno-dragonfly", + "libc", + "winapi", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", ] [[package]] @@ -693,9 +770,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -711,13 +788,13 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide 0.5.4", + "miniz_oxide", ] [[package]] @@ -777,9 +854,9 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frunk" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cd67cf7d54b7e72d0ea76f3985c3747d74aee43e0218ad993b7903ba7a5395e" +checksum = "a89c703bf50009f383a0873845357cc400a95fc535f836feddfe015d7df6e1e0" dependencies = [ "frunk_core", "frunk_derives", @@ -788,15 +865,15 @@ dependencies = [ [[package]] name = "frunk_core" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1246cf43ec80bf8b2505b5c360b8fb999c97dabd17dbb604d85558d5cbc25482" +checksum = "2a446d01a558301dca28ef43222864a9fa2bd9a2e71370f769d5d5d5ec9f3537" [[package]] name = "frunk_derives" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dbc4f084ec5a3f031d24ccedeb87ab2c3189a2f33b8d070889073837d5ea09e" +checksum = "b83164912bb4c97cfe0772913c7af7387ee2e00cb6d4636fb65a35b3d0c8f173" dependencies = [ "frunk_proc_macro_helpers", "quote", @@ -805,9 +882,9 @@ dependencies = [ [[package]] name = "frunk_proc_macro_helpers" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99f11257f106c6753f5ffcb8e601fb39c390a088017aaa55b70c526bff15f63e" +checksum = "015425591bbeb0f5b8a75593340f1789af428e9f887a4f1e36c0c471f067ef50" dependencies = [ "frunk_core", "proc-macro2", @@ -817,9 +894,9 @@ dependencies = [ [[package]] name = "frunk_proc_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a078bd8459eccbb85e0b007b8f756585762a72a9efc53f359b371c3b6351dbcc" +checksum = "ea01524f285deab48affffb342b97f186e657b119c3f1821ac531780e0fbfae0" dependencies = [ "frunk_core", "frunk_proc_macros_impl", @@ -828,9 +905,9 @@ dependencies = [ [[package]] name = "frunk_proc_macros_impl" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ffba99f0fa4f57e42f57388fbb9a0ca863bc2b4261f3c5570fed579d5df6c32" +checksum = "0a802d974cc18ee7fe1a7868fc9ce31086294fd96ba62f8da64ecb44e92a2653" dependencies = [ "frunk_core", "frunk_proc_macro_helpers", @@ -841,15 +918,15 @@ dependencies = [ [[package]] name = "funty" -version = "1.2.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1847abb9cb65d566acd5942e94aea9c8f547ad02c98e1649326fc0e8910b8b1e" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" dependencies = [ "futures-channel", "futures-core", @@ -862,9 +939,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" dependencies = [ "futures-core", "futures-sink", @@ -872,15 +949,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" +checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" [[package]] name = "futures-executor" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" dependencies = [ "futures-core", "futures-task", @@ -889,15 +966,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" [[package]] name = "futures-macro" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" dependencies = [ "proc-macro2", "quote", @@ -906,21 +983,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" +checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" [[package]] name = "futures-task" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" +checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" [[package]] name = "futures-util" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" dependencies = [ "futures-channel", "futures-core", @@ -963,15 +1040,15 @@ checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" dependencies = [ "bytes", "fnv", @@ -988,20 +1065,20 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] name = "hashbrown" -version = "0.12.3" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash", + "ahash 0.8.3", ] [[package]] @@ -1019,14 +1096,14 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "bytes", "headers-core", "http", "httpdate", "mime", - "sha1 0.10.5", + "sha1", ] [[package]] @@ -1040,9 +1117,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.19" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ "libc", ] @@ -1055,9 +1132,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -1093,17 +1170,11 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - [[package]] name = "hyper" -version = "0.14.23" +version = "0.14.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" dependencies = [ "bytes", "futures-channel", @@ -1138,9 +1209,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.51" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5a6ef98976b22b3b7f2f3a806f858cb862044cfa66805aa3ad84cb3d3b785ed" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1178,9 +1249,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown 0.12.3", @@ -1198,21 +1269,30 @@ dependencies = [ [[package]] name = "io-enum" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e3306b0f260aad2872563eb0d5d1a59f2420fad270a661dce59a01e92d806b" +checksum = "e4b0d47a958cb166282b4dc4840a35783e861c2b39080af846e6481ebe145eee" dependencies = [ - "autocfg", "derive_utils", "quote", "syn", ] +[[package]] +name = "io-lifetimes" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "ipnet" -version = "2.5.1" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" +checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" [[package]] name = "itertools" @@ -1225,15 +1305,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] @@ -1263,38 +1343,88 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lexical" -version = "5.2.2" +version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f404a90a744e32e8be729034fc33b90cf2a56418fbf594d69aa3c0214ad414e5" +checksum = "c7aefb36fd43fef7003334742cbf77b243fcd36418a1d1bdd480d613a67968f6" dependencies = [ - "cfg-if", "lexical-core", ] [[package]] name = "lexical-core" -version = "0.7.6" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" +checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" +dependencies = [ + "lexical-parse-float", + "lexical-parse-integer", + "lexical-util", + "lexical-write-float", + "lexical-write-integer", +] + +[[package]] +name = "lexical-parse-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" +dependencies = [ + "lexical-parse-integer", + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-parse-integer" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-util" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" dependencies = [ - "arrayvec 0.5.2", - "bitflags", - "cfg-if", - "ryu", + "static_assertions", +] + +[[package]] +name = "lexical-write-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" +dependencies = [ + "lexical-util", + "lexical-write-integer", + "static_assertions", +] + +[[package]] +name = "lexical-write-integer" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" +dependencies = [ + "lexical-util", "static_assertions", ] [[package]] name = "libc" -version = "0.2.136" +version = "0.2.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55edcf6c0bb319052dea84732cf99db461780fd5e8d3eb46ab6ff312ab31f197" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" [[package]] name = "libloading" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", "winapi", @@ -1302,9 +1432,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -1324,9 +1454,9 @@ dependencies = [ [[package]] name = "link-cplusplus" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" dependencies = [ "cc", ] @@ -1337,6 +1467,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + [[package]] name = "local-ip-address" version = "0.5.1" @@ -1370,11 +1506,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.6.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91" +checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" dependencies = [ - "hashbrown 0.11.2", + "hashbrown 0.12.3", ] [[package]] @@ -1391,9 +1527,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] @@ -1420,15 +1556,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.6.2" @@ -1440,14 +1567,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.36.1", + "windows-sys 0.45.0", ] [[package]] @@ -1506,19 +1633,20 @@ dependencies = [ [[package]] name = "mysql" -version = "21.0.2" +version = "23.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06f5abe1c0f91831afd4d35298c08d958e80144869757b913891e5b0d00c2c96" +checksum = "05f11339ca5c251941805d51362a07823605a80586ced92914ab7de84fba813f" dependencies = [ "bufstream", "bytes", + "crossbeam", + "flate2", "io-enum", "libc", "lru", "mysql_common", "named_pipe", "native-tls", - "nix", "once_cell", "pem", "percent-encoding", @@ -1531,11 +1659,11 @@ dependencies = [ [[package]] name = "mysql_common" -version = "0.27.5" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fa08ec695a40ed899b1239e81d0d74de5b40802d4fc8b513e2c541717c434e" +checksum = "9006c95034ccf7b903d955f210469119f6c3477fc9c9e7a7845ce38a3e665c2a" dependencies = [ - "base64", + "base64 0.13.1", "bigdecimal", "bindgen", "bitflags", @@ -1543,14 +1671,13 @@ dependencies = [ "byteorder", "bytes", "cc", - "chrono", "cmake", "crc32fast", "flate2", "frunk", "lazy_static", "lexical", - "num-bigint 0.4.3", + "num-bigint", "num-traits", "rand", "regex", @@ -1558,13 +1685,13 @@ dependencies = [ "saturating", "serde", "serde_json", - "sha1 0.6.1", + "sha1", "sha2", "smallvec", "subprocess", "thiserror", - "time 0.2.27", - "uuid 0.8.2", + "time 0.3.20", + "uuid", ] [[package]] @@ -1578,9 +1705,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -1604,34 +1731,11 @@ dependencies = [ "libc", ] -[[package]] -name = "nix" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" -dependencies = [ - "bitflags", - "cc", - "cfg-if", - "libc", - "memoffset", -] - [[package]] name = "nom" -version = "5.1.2" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" -dependencies = [ - "memchr", - "version_check", -] - -[[package]] -name = "nom" -version = "7.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", @@ -1649,17 +1753,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" -[[package]] -name = "num-bigint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6f7833f2cbf2360a6cfd58cd41a53aa7a90bd4c202f5b1c7dd2ed73c57b2c3" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-bigint" version = "0.4.3" @@ -1692,23 +1785,14 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ "hermit-abi", "libc", ] -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - [[package]] name = "object" version = "0.30.3" @@ -1720,21 +1804,15 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.15.0" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "openssl" -version = "0.10.42" +version = "0.10.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" +checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" dependencies = [ "bitflags", "cfg-if", @@ -1764,18 +1842,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.22.0+1.1.1q" +version = "111.25.1+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f31f0d509d1c1ae9cada2f9539ff8f37933831fd5098879e482aa687d659853" +checksum = "1ef9a9cc6ea7d9d5e7c4a913dc4b48d0e359eddf01af1dfec96ba7064b4aba10" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.77" +version = "0.9.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" +checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" dependencies = [ "autocfg", "cc", @@ -1807,15 +1885,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -1832,13 +1910,11 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "0.8.3" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ - "base64", - "once_cell", - "regex", + "base64 0.13.1", ] [[package]] @@ -1849,9 +1925,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.4.0" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" dependencies = [ "thiserror", "ucd-trie", @@ -1859,9 +1935,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.4.0" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b75706b9642ebcb34dab3bc7750f811609a0eb1dd8b88c2d15bf628c1c65b2" +checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" dependencies = [ "pest", "pest_generator", @@ -1869,9 +1945,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.4.0" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f9272122f5979a6511a749af9db9bfc810393f63119970d7085fed1c4ea0db" +checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", @@ -1882,13 +1958,13 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.4.0" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8717927f9b79515e565a64fe46c38b8cd0427e64c40680b14a7365ab09ac8d" +checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" dependencies = [ "once_cell", "pest", - "sha1 0.10.5", + "sha2", ] [[package]] @@ -1925,21 +2001,21 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "2.1.1" +version = "2.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5aab5be6e4732b473071984b3164dbbfb7a3674d30ea5ff44410b6bcd960c3c" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", @@ -1951,33 +2027,62 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da1c2388b1513e1b605fcec39a95e0a9e8ef088f71443ef37099fa9ae6673fcb" +checksum = "72f883590242d3c6fc5bf50299011695fa6590c2c70eac95ee1bdb9a733ad1a2" [[package]] name = "predicates-tree" -version = "1.0.5" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d86de6de25020a36c6d3643a86d9a6a9f552107c0559c60ea03551b5e16c032" +checksum = "54ff541861505aabf6ea722d2131ee980b8276e10a1297b94e896dd8b621850d" dependencies = [ "predicates-core", "termtree", ] [[package]] -name = "proc-macro-hack" -version = "0.5.19" +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.20+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" + +[[package]] +name = "proc-macro2" +version = "1.0.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "ptr_meta" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] [[package]] -name = "proc-macro2" -version = "1.0.47" +name = "ptr_meta_derive" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "unicode-ident", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1988,9 +2093,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -2008,9 +2113,9 @@ dependencies = [ [[package]] name = "r2d2_mysql" -version = "21.0.0" +version = "23.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d05145690b395f5515feff202b8f4b9429c500f423ef7129175155c3c3a9e2" +checksum = "9733d738ce65959a744f387bae69aa690a867e18d48e5486b171c47bc7b0c575" dependencies = [ "mysql", "r2d2", @@ -2028,9 +2133,9 @@ dependencies = [ [[package]] name = "radium" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] name = "rand" @@ -2073,9 +2178,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", @@ -2084,26 +2189,26 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "rend" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" dependencies = [ - "winapi", + "bytecheck", ] [[package]] name = "reqwest" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" dependencies = [ - "base64", + "base64 0.21.0", "bytes", "encoding_rs", "futures-core", @@ -2149,13 +2254,38 @@ dependencies = [ "winapi", ] +[[package]] +name = "rkyv" +version = "0.7.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c30f1d45d9aa61cbc8cd1eb87705470892289bb2d01943e7803b873a57404dc3" +dependencies = [ + "bytecheck", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "ron" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "serde", ] @@ -2186,13 +2316,20 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.26.1" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" +checksum = "e13cf35f7140155d02ba4ec3294373d513a3c7baa8364c162b030e33c61520a8" dependencies = [ - "arrayvec 0.7.2", + "arrayvec", + "borsh", + "bytecheck", + "byteorder", + "bytes", "num-traits", + "rand", + "rkyv", "serde", + "serde_json", ] [[package]] @@ -2209,27 +2346,32 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc_version" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 0.9.0", + "semver", ] [[package]] -name = "rustc_version" -version = "0.4.0" +name = "rustix" +version = "0.36.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" dependencies = [ - "semver 1.0.14", + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", ] [[package]] name = "rustls" -version = "0.20.7" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "log", "ring", @@ -2243,29 +2385,29 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] name = "rustls-pemfile" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64", + "base64 0.21.0", ] [[package]] name = "rustversion" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safemem" @@ -2281,28 +2423,27 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977a7519bff143a44f842fd07e80ad1329295bd71686457f18e496736f4bf9bf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ "parking_lot", ] [[package]] name = "scoped-tls" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" @@ -2312,9 +2453,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "sct" @@ -2326,11 +2467,17 @@ dependencies = [ "untrusted", ] +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + [[package]] name = "security-framework" -version = "2.7.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ "bitflags", "core-foundation", @@ -2341,9 +2488,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.6.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -2351,30 +2498,15 @@ dependencies = [ [[package]] name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" - -[[package]] -name = "semver-parser" -version = "0.7.0" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" [[package]] name = "serde" -version = "1.0.152" +version = "1.0.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "8cdd151213925e7f1ab45a9bbfb129316bd00799784b174b7cc7bcd16961c49e" dependencies = [ "serde_derive", ] @@ -2391,18 +2523,18 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718dc5fff5b36f99093fc49b280cfc96ce6fc824317783bff5a1fed0c7a64819" +checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "4fc80d722935453bcafdc2c9a73cd6fac4dc1938f0346035d84bf99fa9e33217" dependencies = [ "proc-macro2", "quote", @@ -2411,9 +2543,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.87" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", @@ -2422,18 +2554,18 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b04f22b563c91331a10074bda3dd5492e3cc39d56bd557e91c0af42b6c7341" +checksum = "db0969fff533976baadd92e08b1d102c5a3d8a8049eadfd69d4d1e3c5b2ed189" dependencies = [ "serde", ] [[package]] name = "serde_repr" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a5ec9fa74a20ebbe5d9ac23dac1fc96ba0ecfe9f50f2843b52e537b10fbcb4e" +checksum = "395627de918015623b32e7669714206363a7fc00382bf477e72c1f7533e8eafc" dependencies = [ "proc-macro2", "quote", @@ -2454,25 +2586,25 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.0.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368f2d60d049ea019a84dcd6687b0d1e0030fe663ae105039bdf967ed5e6a9a7" +checksum = "7ea48c9627169d206b35905699f513f513c303ab9d964a59b44fdcf66c1d1ab7" dependencies = [ - "base64", + "base64 0.13.1", "chrono", "hex", "indexmap", "serde", "serde_json", "serde_with_macros", - "time 0.3.16", + "time 0.3.20", ] [[package]] name = "serde_with_macros" -version = "2.0.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ccadfacf6cf10faad22bbadf55986bdd0856edfb5d9210aa1dcf1f516e84e93" +checksum = "9e6b7e52858f9f06c25e1c566bbb4ab428200cb3b30053ea09dc50837de7538b" dependencies = [ "darling", "proc-macro2", @@ -2482,22 +2614,13 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", -] - -[[package]] -name = "sha1" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" -dependencies = [ - "sha1_smol", + "digest", ] [[package]] @@ -2508,26 +2631,18 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.5", + "digest", ] -[[package]] -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - [[package]] name = "sha2" -version = "0.9.9" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ - "block-buffer 0.9.0", "cfg-if", "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "digest", ] [[package]] @@ -2538,18 +2653,24 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "slab" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" dependencies = [ "autocfg", ] @@ -2562,9 +2683,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -2576,76 +2697,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version 0.2.3", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1 0.6.1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - [[package]] name = "strsim" version = "0.10.0" @@ -2664,9 +2721,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.107" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -2675,9 +2732,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "tap" @@ -2687,56 +2744,46 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" dependencies = [ "cfg-if", "fastrand", - "libc", "redox_syscall", - "remove_dir_all", - "winapi", + "rustix", + "windows-sys 0.42.0", ] [[package]] name = "termcolor" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] [[package]] name = "termtree" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507e9898683b6c43a9aa55b64259b721b52ba226e0f3779137e50ad114a4c90b" - -[[package]] -name = "textwrap" -version = "0.11.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] +checksum = "95059e91184749cb66be6dc994f67f182b6d897cb3df74a5bf66b5e709295fd8" [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", @@ -2745,9 +2792,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", @@ -2756,31 +2803,14 @@ dependencies = [ [[package]] name = "time" -version = "0.2.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros 0.1.1", - "version_check", - "winapi", -] - -[[package]] -name = "time" -version = "0.3.16" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fab5c8b9980850e06d92ddbe3ab839c062c801f3927c0fb8abd6fc8e918fbca" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", - "libc", - "num_threads", "serde", "time-core", - "time-macros 0.2.5", + "time-macros", ] [[package]] @@ -2791,36 +2821,13 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros" -version = "0.2.5" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bb801831d812c562ae7d2bfb531f26e66e4e1f6b17307ba4149c5064710e5b" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] -[[package]] -name = "time-macros-impl" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - [[package]] name = "tinyvec" version = "1.6.0" @@ -2832,15 +2839,15 @@ dependencies = [ [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.21.2" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg", "bytes", @@ -2852,14 +2859,14 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "winapi", + "windows-sys 0.45.0", ] [[package]] name = "tokio-macros" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -2868,9 +2875,9 @@ dependencies = [ [[package]] name = "tokio-native-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", @@ -2889,9 +2896,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" +checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" dependencies = [ "futures-core", "pin-project-lite", @@ -2912,9 +2919,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.4" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", @@ -2926,9 +2933,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] @@ -2976,7 +2983,7 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", - "uuid 1.2.1", + "uuid", "warp", ] @@ -2992,7 +2999,7 @@ dependencies = [ "toml", "torrust-tracker-located-error", "torrust-tracker-primitives", - "uuid 1.2.1", + "uuid", ] [[package]] @@ -3092,9 +3099,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" @@ -3102,7 +3109,7 @@ version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", @@ -3137,9 +3144,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" @@ -3158,15 +3165,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -3208,15 +3215,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" - -[[package]] -name = "uuid" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" dependencies = [ "getrandom", ] @@ -3227,12 +3228,6 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - [[package]] name = "version_check" version = "0.9.4" @@ -3295,9 +3290,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3305,9 +3300,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", "log", @@ -3320,9 +3315,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" dependencies = [ "cfg-if", "js-sys", @@ -3332,9 +3327,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3342,9 +3337,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", @@ -3355,15 +3350,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.83" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "web-sys" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", @@ -3379,15 +3374,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "which" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" -dependencies = [ - "libc", -] - [[package]] name = "winapi" version = "0.3.9" @@ -3419,19 +3405,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", -] - [[package]] name = "windows-sys" version = "0.42.0" @@ -3439,85 +3412,79 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", - "windows_aarch64_msvc 0.42.0", - "windows_i686_gnu 0.42.0", - "windows_i686_msvc 0.42.0", - "windows_x86_64_gnu 0.42.0", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", "windows_x86_64_gnullvm", - "windows_x86_64_msvc 0.42.0", + "windows_x86_64_msvc", ] [[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.0" +name = "windows-sys" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] [[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" +name = "windows-targets" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" +checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] [[package]] -name = "windows_aarch64_msvc" -version = "0.42.0" +name = "windows_aarch64_gnullvm" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" [[package]] -name = "windows_i686_gnu" -version = "0.36.1" +name = "windows_aarch64_msvc" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" [[package]] name = "windows_i686_gnu" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" - -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" [[package]] name = "windows_i686_msvc" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" [[package]] name = "windows_x86_64_gnu" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" [[package]] name = "windows_x86_64_msvc" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" [[package]] name = "winreg" @@ -3530,9 +3497,9 @@ dependencies = [ [[package]] name = "wyz" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "129e027ad65ce1453680623c3fb5163cbf7107bfe1aa32257e7d0e63f9ced188" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ "tap", ] diff --git a/Cargo.toml b/Cargo.toml index 740a5805e..dc51d8dca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" r2d2 = "0.8" -r2d2_mysql = "21" +r2d2_mysql = "23" r2d2_sqlite = { version = "0.21", features = ["bundled"] } rand = "0.8" derive_more = "0.99" diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index c8117a45c..f0c7ec1dd 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -6,7 +6,7 @@ use log::debug; use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; -use r2d2_mysql::MysqlConnectionManager; +use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::DatabaseDriver; use crate::databases::{Database, Error}; @@ -17,7 +17,7 @@ use crate::tracker::auth::{self, Key}; const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; pub struct Mysql { - pool: Pool, + pool: Pool, } #[async_trait] @@ -28,7 +28,7 @@ impl Database for Mysql { fn new(db_path: &str) -> Result { let opts = Opts::from_url(db_path)?; let builder = OptsBuilder::from_opts(opts); - let manager = MysqlConnectionManager::new(builder); + let manager = MySqlConnectionManager::new(builder); let pool = r2d2::Pool::builder().build(manager).map_err(|e| (e, DRIVER))?; Ok(Self { pool }) diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 00663c383..e3c12a828 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -64,10 +64,11 @@ impl std::fmt::Display for ExpiringKey { "key: `{}`, valid until `{}`", self.key, DateTime::::from_utc( - NaiveDateTime::from_timestamp( + NaiveDateTime::from_timestamp_opt( i64::try_from(self.valid_until.as_secs()).expect("Overflow of i64 seconds, very future!"), self.valid_until.subsec_nanos(), - ), + ) + .unwrap(), Utc ) ) From 6854081d2e5e187b72bb3d34407df20feefced90 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 10:29:31 +0000 Subject: [PATCH 405/435] refactor: remove cargo dep warp and add dep hyper We were only using `hyper` from `warp` dependency. I've removed the `warp` dependency and added `hyper`. --- Cargo.lock | 203 +--------------------------------------- Cargo.toml | 2 +- src/apis/server.rs | 1 - src/http/v1/launcher.rs | 1 - 4 files changed, 3 insertions(+), 204 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1fbd61c19..42c4f8084 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -171,7 +171,7 @@ dependencies = [ "hyper", "pin-project-lite", "rustls", - "rustls-pemfile 1.0.2", + "rustls-pemfile", "tokio", "tokio-rustls", "tower-service", @@ -321,16 +321,6 @@ dependencies = [ "syn", ] -[[package]] -name = "buf_redux" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" -dependencies = [ - "memchr", - "safemem", -] - [[package]] name = "bufstream" version = "0.1.4" @@ -1090,31 +1080,6 @@ dependencies = [ "hashbrown 0.12.3", ] -[[package]] -name = "headers" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" -dependencies = [ - "base64 0.13.1", - "bitflags", - "bytes", - "headers-core", - "http", - "httpdate", - "mime", - "sha1", -] - -[[package]] -name = "headers-core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" -dependencies = [ - "http", -] - [[package]] name = "hermit-abi" version = "0.2.6" @@ -1540,16 +1505,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1613,24 +1568,6 @@ dependencies = [ "serde", ] -[[package]] -name = "multipart" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand", - "safemem", - "tempfile", - "twoway", -] - [[package]] name = "mysql" version = "23.0.1" @@ -2085,12 +2022,6 @@ dependencies = [ "syn", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quote" version = "1.0.23" @@ -2379,15 +2310,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "rustls-pemfile" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" -dependencies = [ - "base64 0.13.1", -] - [[package]] name = "rustls-pemfile" version = "1.0.2" @@ -2409,12 +2331,6 @@ version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "saturating" version = "0.1.0" @@ -2439,12 +2355,6 @@ dependencies = [ "parking_lot", ] -[[package]] -name = "scoped-tls" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" - [[package]] name = "scopeguard" version = "1.1.0" @@ -2612,17 +2522,6 @@ dependencies = [ "syn", ] -[[package]] -name = "sha-1" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - [[package]] name = "sha1" version = "0.10.5" @@ -2894,29 +2793,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "tokio-stream" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite", -] - [[package]] name = "tokio-util" version = "0.7.7" @@ -2957,6 +2833,7 @@ dependencies = [ "fern", "futures", "hex", + "hyper", "lazy_static", "local-ip-address", "log", @@ -2984,7 +2861,6 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "uuid", - "warp", ] [[package]] @@ -3103,34 +2979,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" -[[package]] -name = "tungstenite" -version = "0.17.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand", - "sha-1", - "thiserror", - "url", - "utf-8", -] - -[[package]] -name = "twoway" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" -dependencies = [ - "memchr", -] - [[package]] name = "twox-hash" version = "1.6.3" @@ -3154,15 +3002,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.11" @@ -3207,12 +3046,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "uuid" version = "1.3.0" @@ -3244,38 +3077,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "warp" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed7b8be92646fc3d18b06147664ebc5f48d222686cb11a8755e561a735aacc6d" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "headers", - "http", - "hyper", - "log", - "mime", - "mime_guess", - "multipart", - "percent-encoding", - "pin-project", - "rustls-pemfile 0.2.1", - "scoped-tls", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-rustls", - "tokio-stream", - "tokio-tungstenite", - "tokio-util", - "tower-service", - "tracing", -] - [[package]] name = "wasi" version = "0.10.0+wasi-snapshot-preview1" diff --git a/Cargo.toml b/Cargo.toml index dc51d8dca..064c18b0e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,6 @@ percent-encoding = "2" binascii = "0.1" lazy_static = "1.4" openssl = { version = "0.10", features = ["vendored"] } -warp = { version = "0.3", features = ["tls"] } config = "0.13" toml = "0.5" log = { version = "0.4", features = ["release_max_level_info"] } @@ -47,6 +46,7 @@ torrust-tracker-primitives = { path = "packages/primitives" } torrust-tracker-configuration = { path = "packages/configuration" } torrust-tracker-located-error = { path = "packages/located-error" } multimap = "0.8.3" +hyper = "0.14.24" [dev-dependencies] mockall = "0.11" diff --git a/src/apis/server.rs b/src/apis/server.rs index a283bbc54..daac35999 100644 --- a/src/apis/server.rs +++ b/src/apis/server.rs @@ -7,7 +7,6 @@ use axum_server::Handle; use futures::future::BoxFuture; use futures::Future; use log::info; -use warp::hyper; use super::routes::router; use crate::signals::shutdown_signal; diff --git a/src/http/v1/launcher.rs b/src/http/v1/launcher.rs index a49efd11d..45bc54664 100644 --- a/src/http/v1/launcher.rs +++ b/src/http/v1/launcher.rs @@ -8,7 +8,6 @@ use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; use futures::future::BoxFuture; use log::info; -use warp::hyper; use super::routes::router; use crate::http::server::HttpServerLauncher; From f29aaebfaec0b828d37c2c6875d8cf8bd2483cd7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 13:06:20 +0000 Subject: [PATCH 406/435] feat: bump cargo dep: toml from 0.5.11 to 0.7.2 --- Cargo.lock | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++---- Cargo.toml | 2 +- 2 files changed, 57 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 42c4f8084..96fec0b2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -449,7 +449,7 @@ dependencies = [ "rust-ini", "serde", "serde_json", - "toml", + "toml 0.5.11", "yaml-rust", ] @@ -1984,7 +1984,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ - "toml", + "toml 0.5.11", ] [[package]] @@ -2482,6 +2482,15 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_spanned" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2816,6 +2825,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "torrust-tracker" version = "2.3.0" @@ -2855,7 +2898,7 @@ dependencies = [ "serde_with", "thiserror", "tokio", - "toml", + "toml 0.7.2", "torrust-tracker-configuration", "torrust-tracker-located-error", "torrust-tracker-primitives", @@ -2872,7 +2915,7 @@ dependencies = [ "serde", "serde_with", "thiserror", - "toml", + "toml 0.5.11", "torrust-tracker-located-error", "torrust-tracker-primitives", "uuid", @@ -3287,6 +3330,15 @@ version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +[[package]] +name = "winnow" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.10.1" diff --git a/Cargo.toml b/Cargo.toml index 064c18b0e..977ec57c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ binascii = "0.1" lazy_static = "1.4" openssl = { version = "0.10", features = ["vendored"] } config = "0.13" -toml = "0.5" +toml = "0.7" log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" From ad488c4c756aad13184854bcbcddf42e946eae04 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 13:19:02 +0000 Subject: [PATCH 407/435] feat: bump cargo dep: axum from 0.6.1 to 0.6.10 --- Cargo.lock | 12 ++++++------ Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 96fec0b2c..519ea50f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -98,9 +98,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.7" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fb79c228270dcf2426e74864cabc94babb5dbab01a4314e702d2f16540e1591" +checksum = "8582122b8edba2af43eaf6b80dbfd33f421b5a0eb3a3113d21bc096ac5b44faf" dependencies = [ "async-trait", "axum-core", @@ -1270,9 +1270,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "js-sys" @@ -2966,9 +2966,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.5" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" dependencies = [ "bitflags", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 977ec57c8..1c11ce0a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ futures = "0.3" async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } -axum = "0.6.1" +axum = "0.6.10" axum-server = { version = "0.4.4", features = ["tls-rustls"] } axum-client-ip = "0.4.0" bip_bencode = "0.4.4" From ff9985e6cc3895af6f159f871608458ac22d4c74 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 13:47:56 +0000 Subject: [PATCH 408/435] feat: use only major and minor version in cargo deps `axum` is set to `0.6.10` becuase setting it to `0.6` makes cargo to downgrade it to `0.6.7`. --- Cargo.toml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1c11ce0a5..6f213995f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,13 +13,13 @@ repository = "https://github.com/torrust/torrust-tracker" version = "2.3.0" [dependencies] -tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +tokio = { version = "1.26", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } serde = { version = "1.0", features = ["derive"] } -serde_bencode = "^0.2.3" +serde_bencode = "^0.2" serde_json = "1.0" serde_with = "2.0" -hex = "0.4.3" -percent-encoding = "2" +hex = "0.4" +percent-encoding = "2.2" binascii = "0.1" lazy_static = "1.4" openssl = { version = "0.10", features = ["vendored"] } @@ -29,7 +29,7 @@ log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" r2d2 = "0.8" -r2d2_mysql = "23" +r2d2_mysql = "23.0" r2d2_sqlite = { version = "0.21", features = ["bundled"] } rand = "0.8" derive_more = "0.99" @@ -39,22 +39,22 @@ async-trait = "0.1" aquatic_udp_protocol = "0.2" uuid = { version = "1", features = ["v4"] } axum = "0.6.10" -axum-server = { version = "0.4.4", features = ["tls-rustls"] } -axum-client-ip = "0.4.0" -bip_bencode = "0.4.4" +axum-server = { version = "0.4", features = ["tls-rustls"] } +axum-client-ip = "0.4" +bip_bencode = "0.4" torrust-tracker-primitives = { path = "packages/primitives" } torrust-tracker-configuration = { path = "packages/configuration" } torrust-tracker-located-error = { path = "packages/located-error" } -multimap = "0.8.3" -hyper = "0.14.24" +multimap = "0.8" +hyper = "0.14" [dev-dependencies] mockall = "0.11" -reqwest = { version = "0.11.13", features = ["json"] } -serde_urlencoded = "0.7.1" -serde_repr = "0.1.10" -serde_bytes = "0.11.8" -local-ip-address = "0.5.1" +reqwest = { version = "0.11", features = ["json"] } +serde_urlencoded = "0.7" +serde_repr = "0.1" +serde_bytes = "0.11" +local-ip-address = "0.5" torrust-tracker-test-helpers = { path = "packages/test-helpers" } [workspace] From 8e387cd63464e91283d0784c43e991c5196ec90a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 14:10:20 +0000 Subject: [PATCH 409/435] feat: update cargo aliases --- .cargo/config.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index e3d31cf7f..71480e92d 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,5 @@ [alias] -cov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" +cov = "llvm-cov" +cov-lcov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" cov-html = "llvm-cov --html" +time = "build --timings --all-targets" From 19d33b403b930b24925d3f0b42344b5e71bcdcbb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Mar 2023 16:48:38 +0000 Subject: [PATCH 410/435] refactor: [#157] extract API contexts --- src/apis/context/auth_key/handlers.rs | 46 ++++++ src/apis/context/auth_key/mod.rs | 4 + .../auth_key/resources.rs} | 0 src/apis/context/auth_key/responses.rs | 35 +++++ src/apis/context/auth_key/routes.rs | 25 ++++ src/apis/{resources => context}/mod.rs | 2 +- src/apis/context/stats/handlers.rs | 13 ++ src/apis/context/stats/mod.rs | 4 + .../stats.rs => context/stats/resources.rs} | 0 src/apis/context/stats/responses.rs | 8 + src/apis/context/stats/routes.rs | 11 ++ src/apis/context/torrent/handlers.rs | 59 ++++++++ src/apis/context/torrent/mod.rs | 4 + src/apis/context/torrent/resources/mod.rs | 2 + .../{ => context/torrent}/resources/peer.rs | 0 .../torrent}/resources/torrent.rs | 5 +- src/apis/context/torrent/responses.rs | 18 +++ src/apis/context/torrent/routes.rs | 17 +++ src/apis/context/whitelist/handlers.rs | 46 ++++++ src/apis/context/whitelist/mod.rs | 3 + src/apis/context/whitelist/responses.rs | 20 +++ src/apis/context/whitelist/routes.rs | 22 +++ src/apis/handlers.rs | 138 ------------------ src/apis/mod.rs | 8 +- src/apis/responses.rs | 76 +--------- src/apis/routes.rs | 52 ++----- tests/api/asserts.rs | 6 +- tests/tracker_api.rs | 8 +- 28 files changed, 364 insertions(+), 268 deletions(-) create mode 100644 src/apis/context/auth_key/handlers.rs create mode 100644 src/apis/context/auth_key/mod.rs rename src/apis/{resources/auth_key.rs => context/auth_key/resources.rs} (100%) create mode 100644 src/apis/context/auth_key/responses.rs create mode 100644 src/apis/context/auth_key/routes.rs rename src/apis/{resources => context}/mod.rs (72%) create mode 100644 src/apis/context/stats/handlers.rs create mode 100644 src/apis/context/stats/mod.rs rename src/apis/{resources/stats.rs => context/stats/resources.rs} (100%) create mode 100644 src/apis/context/stats/responses.rs create mode 100644 src/apis/context/stats/routes.rs create mode 100644 src/apis/context/torrent/handlers.rs create mode 100644 src/apis/context/torrent/mod.rs create mode 100644 src/apis/context/torrent/resources/mod.rs rename src/apis/{ => context/torrent}/resources/peer.rs (100%) rename src/apis/{ => context/torrent}/resources/torrent.rs (96%) create mode 100644 src/apis/context/torrent/responses.rs create mode 100644 src/apis/context/torrent/routes.rs create mode 100644 src/apis/context/whitelist/handlers.rs create mode 100644 src/apis/context/whitelist/mod.rs create mode 100644 src/apis/context/whitelist/responses.rs create mode 100644 src/apis/context/whitelist/routes.rs delete mode 100644 src/apis/handlers.rs diff --git a/src/apis/context/auth_key/handlers.rs b/src/apis/context/auth_key/handlers.rs new file mode 100644 index 000000000..af78b3f4c --- /dev/null +++ b/src/apis/context/auth_key/handlers.rs @@ -0,0 +1,46 @@ +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use axum::extract::{Path, State}; +use axum::response::Response; +use serde::Deserialize; + +use super::responses::{ + auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, +}; +use crate::apis::context::auth_key::resources::AuthKey; +use crate::apis::responses::{invalid_auth_key_param_response, ok_response}; +use crate::tracker::auth::Key; +use crate::tracker::Tracker; + +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + let seconds_valid = seconds_valid_or_key; + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(e) => failed_to_generate_key_response(e), + } +} + +#[derive(Deserialize)] +pub struct KeyParam(String); + +pub async fn delete_auth_key_handler( + State(tracker): State>, + Path(seconds_valid_or_key): Path, +) -> Response { + match Key::from_str(&seconds_valid_or_key.0) { + Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), + Ok(key) => match tracker.remove_auth_key(&key.to_string()).await { + Ok(_) => ok_response(), + Err(e) => failed_to_delete_key_response(e), + }, + } +} + +pub async fn reload_keys_handler(State(tracker): State>) -> Response { + match tracker.load_keys_from_database().await { + Ok(_) => ok_response(), + Err(e) => failed_to_reload_keys_response(e), + } +} diff --git a/src/apis/context/auth_key/mod.rs b/src/apis/context/auth_key/mod.rs new file mode 100644 index 000000000..746a2f064 --- /dev/null +++ b/src/apis/context/auth_key/mod.rs @@ -0,0 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/apis/resources/auth_key.rs b/src/apis/context/auth_key/resources.rs similarity index 100% rename from src/apis/resources/auth_key.rs rename to src/apis/context/auth_key/resources.rs diff --git a/src/apis/context/auth_key/responses.rs b/src/apis/context/auth_key/responses.rs new file mode 100644 index 000000000..8c1bf58dc --- /dev/null +++ b/src/apis/context/auth_key/responses.rs @@ -0,0 +1,35 @@ +use std::error::Error; + +use axum::http::{header, StatusCode}; +use axum::response::{IntoResponse, Response}; + +use crate::apis::context::auth_key::resources::AuthKey; +use crate::apis::responses::unhandled_rejection_response; + +/// # Panics +/// +/// Will panic if it can't convert the `AuthKey` resource to json +#[must_use] +pub fn auth_key_response(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + +#[must_use] +pub fn failed_to_generate_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to generate key: {e}")) +} + +#[must_use] +pub fn failed_to_delete_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to delete key: {e}")) +} + +#[must_use] +pub fn failed_to_reload_keys_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload keys: {e}")) +} diff --git a/src/apis/context/auth_key/routes.rs b/src/apis/context/auth_key/routes.rs new file mode 100644 index 000000000..2a4f5b9dd --- /dev/null +++ b/src/apis/context/auth_key/routes.rs @@ -0,0 +1,25 @@ +use std::sync::Arc; + +use axum::routing::{get, post}; +use axum::Router; + +use super::handlers::{delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + // Keys + router + .route( + // code-review: Axum does not allow two routes with the same path but different path variable name. + // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: + // POST /api/key + // DELETE /api/key/:key + "/api/key/:seconds_valid_or_key", + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), + ) + // Keys command + .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker)) +} diff --git a/src/apis/resources/mod.rs b/src/apis/context/mod.rs similarity index 72% rename from src/apis/resources/mod.rs rename to src/apis/context/mod.rs index bf3ce273b..6d3fb7566 100644 --- a/src/apis/resources/mod.rs +++ b/src/apis/context/mod.rs @@ -1,4 +1,4 @@ pub mod auth_key; -pub mod peer; pub mod stats; pub mod torrent; +pub mod whitelist; diff --git a/src/apis/context/stats/handlers.rs b/src/apis/context/stats/handlers.rs new file mode 100644 index 000000000..e93e65996 --- /dev/null +++ b/src/apis/context/stats/handlers.rs @@ -0,0 +1,13 @@ +use std::sync::Arc; + +use axum::extract::State; +use axum::response::Json; + +use super::resources::Stats; +use super::responses::stats_response; +use crate::tracker::services::statistics::get_metrics; +use crate::tracker::Tracker; + +pub async fn get_stats_handler(State(tracker): State>) -> Json { + stats_response(get_metrics(tracker.clone()).await) +} diff --git a/src/apis/context/stats/mod.rs b/src/apis/context/stats/mod.rs new file mode 100644 index 000000000..746a2f064 --- /dev/null +++ b/src/apis/context/stats/mod.rs @@ -0,0 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/apis/resources/stats.rs b/src/apis/context/stats/resources.rs similarity index 100% rename from src/apis/resources/stats.rs rename to src/apis/context/stats/resources.rs diff --git a/src/apis/context/stats/responses.rs b/src/apis/context/stats/responses.rs new file mode 100644 index 000000000..ea9a2480a --- /dev/null +++ b/src/apis/context/stats/responses.rs @@ -0,0 +1,8 @@ +use axum::response::Json; + +use super::resources::Stats; +use crate::tracker::services::statistics::TrackerMetrics; + +pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { + Json(Stats::from(tracker_metrics)) +} diff --git a/src/apis/context/stats/routes.rs b/src/apis/context/stats/routes.rs new file mode 100644 index 000000000..8791ed25a --- /dev/null +++ b/src/apis/context/stats/routes.rs @@ -0,0 +1,11 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::get_stats_handler; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + router.route("/api/stats", get(get_stats_handler).with_state(tracker)) +} diff --git a/src/apis/context/torrent/handlers.rs b/src/apis/context/torrent/handlers.rs new file mode 100644 index 000000000..1a8280e75 --- /dev/null +++ b/src/apis/context/torrent/handlers.rs @@ -0,0 +1,59 @@ +use std::fmt; +use std::str::FromStr; +use std::sync::Arc; + +use axum::extract::{Path, Query, State}; +use axum::response::{IntoResponse, Json, Response}; +use serde::{de, Deserialize, Deserializer}; + +use super::resources::torrent::ListItem; +use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; +use crate::apis::responses::invalid_info_hash_param_response; +use crate::apis::InfoHashParam; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use crate::tracker::Tracker; + +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { + Some(info) => torrent_info_response(info).into_response(), + None => torrent_not_known_response(), + }, + } +} + +#[derive(Deserialize)] +pub struct PaginationParams { + #[serde(default, deserialize_with = "empty_string_as_none")] + pub offset: Option, + pub limit: Option, +} + +pub async fn get_torrents_handler( + State(tracker): State>, + pagination: Query, +) -> Json> { + torrent_list_response( + &get_torrents( + tracker.clone(), + &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + ) + .await, + ) +} + +/// Serde deserialization decorator to map empty Strings to None, +fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: FromStr, + T::Err: fmt::Display, +{ + let opt = Option::::deserialize(de)?; + match opt.as_deref() { + None | Some("") => Ok(None), + Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), + } +} diff --git a/src/apis/context/torrent/mod.rs b/src/apis/context/torrent/mod.rs new file mode 100644 index 000000000..746a2f064 --- /dev/null +++ b/src/apis/context/torrent/mod.rs @@ -0,0 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/apis/context/torrent/resources/mod.rs b/src/apis/context/torrent/resources/mod.rs new file mode 100644 index 000000000..46d62aac5 --- /dev/null +++ b/src/apis/context/torrent/resources/mod.rs @@ -0,0 +1,2 @@ +pub mod peer; +pub mod torrent; diff --git a/src/apis/resources/peer.rs b/src/apis/context/torrent/resources/peer.rs similarity index 100% rename from src/apis/resources/peer.rs rename to src/apis/context/torrent/resources/peer.rs diff --git a/src/apis/resources/torrent.rs b/src/apis/context/torrent/resources/torrent.rs similarity index 96% rename from src/apis/resources/torrent.rs rename to src/apis/context/torrent/resources/torrent.rs index 3d8b2f427..1099dc923 100644 --- a/src/apis/resources/torrent.rs +++ b/src/apis/context/torrent/resources/torrent.rs @@ -74,8 +74,9 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::apis::resources::peer::Peer; - use crate::apis::resources::torrent::{ListItem, Torrent}; + use super::Torrent; + use crate::apis::context::torrent::resources::peer::Peer; + use crate::apis::context::torrent::resources::torrent::ListItem; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/apis/context/torrent/responses.rs b/src/apis/context/torrent/responses.rs new file mode 100644 index 000000000..48e3c6e7f --- /dev/null +++ b/src/apis/context/torrent/responses.rs @@ -0,0 +1,18 @@ +use axum::response::{IntoResponse, Json, Response}; +use serde_json::json; + +use super::resources::torrent::{ListItem, Torrent}; +use crate::tracker::services::torrent::{BasicInfo, Info}; + +pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { + Json(ListItem::new_vec(basic_infos)) +} + +pub fn torrent_info_response(info: Info) -> Json { + Json(Torrent::from(info)) +} + +#[must_use] +pub fn torrent_not_known_response() -> Response { + Json(json!("torrent not known")).into_response() +} diff --git a/src/apis/context/torrent/routes.rs b/src/apis/context/torrent/routes.rs new file mode 100644 index 000000000..234f17223 --- /dev/null +++ b/src/apis/context/torrent/routes.rs @@ -0,0 +1,17 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::{get_torrent_handler, get_torrents_handler}; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + // Torrents + router + .route( + "/api/torrent/:info_hash", + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route("/api/torrents", get(get_torrents_handler).with_state(tracker)) +} diff --git a/src/apis/context/whitelist/handlers.rs b/src/apis/context/whitelist/handlers.rs new file mode 100644 index 000000000..c1e90a509 --- /dev/null +++ b/src/apis/context/whitelist/handlers.rs @@ -0,0 +1,46 @@ +use std::str::FromStr; +use std::sync::Arc; + +use axum::extract::{Path, State}; +use axum::response::Response; + +use super::responses::{ + failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, +}; +use crate::apis::responses::{invalid_info_hash_param_response, ok_response}; +use crate::apis::InfoHashParam; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::Tracker; + +pub async fn add_torrent_to_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(_) => ok_response(), + Err(e) => failed_to_whitelist_torrent_response(e), + }, + } +} + +pub async fn remove_torrent_from_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => ok_response(), + Err(e) => failed_to_remove_torrent_from_whitelist_response(e), + }, + } +} + +pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { + match tracker.load_whitelist_from_database().await { + Ok(_) => ok_response(), + Err(e) => failed_to_reload_whitelist_response(e), + } +} diff --git a/src/apis/context/whitelist/mod.rs b/src/apis/context/whitelist/mod.rs new file mode 100644 index 000000000..f6f000f34 --- /dev/null +++ b/src/apis/context/whitelist/mod.rs @@ -0,0 +1,3 @@ +pub mod handlers; +pub mod responses; +pub mod routes; diff --git a/src/apis/context/whitelist/responses.rs b/src/apis/context/whitelist/responses.rs new file mode 100644 index 000000000..dd2727898 --- /dev/null +++ b/src/apis/context/whitelist/responses.rs @@ -0,0 +1,20 @@ +use std::error::Error; + +use axum::response::Response; + +use crate::apis::responses::unhandled_rejection_response; + +#[must_use] +pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to remove torrent from whitelist: {e}")) +} + +#[must_use] +pub fn failed_to_whitelist_torrent_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to whitelist torrent: {e}")) +} + +#[must_use] +pub fn failed_to_reload_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload whitelist: {e}")) +} diff --git a/src/apis/context/whitelist/routes.rs b/src/apis/context/whitelist/routes.rs new file mode 100644 index 000000000..1349f8bc1 --- /dev/null +++ b/src/apis/context/whitelist/routes.rs @@ -0,0 +1,22 @@ +use std::sync::Arc; + +use axum::routing::{delete, get, post}; +use axum::Router; + +use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + router + // Whitelisted torrents + .route( + "/api/whitelist/:info_hash", + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) + .route( + "/api/whitelist/:info_hash", + delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) + // Whitelist commands + .route("/api/whitelist/reload", get(reload_whitelist_handler).with_state(tracker)) +} diff --git a/src/apis/handlers.rs b/src/apis/handlers.rs deleted file mode 100644 index 410def39b..000000000 --- a/src/apis/handlers.rs +++ /dev/null @@ -1,138 +0,0 @@ -use std::fmt; -use std::str::FromStr; -use std::sync::Arc; -use std::time::Duration; - -use axum::extract::{Path, Query, State}; -use axum::response::{IntoResponse, Json, Response}; -use serde::{de, Deserialize, Deserializer}; - -use super::responses::{ - auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, - failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, - invalid_auth_key_param_response, invalid_info_hash_param_response, ok_response, stats_response, torrent_info_response, - torrent_list_response, torrent_not_known_response, -}; -use crate::apis::resources::auth_key::AuthKey; -use crate::apis::resources::stats::Stats; -use crate::apis::resources::torrent::ListItem; -use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth::Key; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; -use crate::tracker::Tracker; - -pub async fn get_stats_handler(State(tracker): State>) -> Json { - stats_response(get_metrics(tracker.clone()).await) -} - -#[derive(Deserialize)] -pub struct InfoHashParam(String); - -pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { - Some(info) => torrent_info_response(info).into_response(), - None => torrent_not_known_response(), - }, - } -} - -#[derive(Deserialize)] -pub struct PaginationParams { - #[serde(default, deserialize_with = "empty_string_as_none")] - pub offset: Option, - pub limit: Option, -} - -pub async fn get_torrents_handler( - State(tracker): State>, - pagination: Query, -) -> Json> { - torrent_list_response( - &get_torrents( - tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), - ) - .await, - ) -} - -pub async fn add_torrent_to_whitelist_handler( - State(tracker): State>, - Path(info_hash): Path, -) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(_) => ok_response(), - Err(e) => failed_to_whitelist_torrent_response(e), - }, - } -} - -pub async fn remove_torrent_from_whitelist_handler( - State(tracker): State>, - Path(info_hash): Path, -) -> Response { - match InfoHash::from_str(&info_hash.0) { - Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => ok_response(), - Err(e) => failed_to_remove_torrent_from_whitelist_response(e), - }, - } -} - -pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { - match tracker.load_whitelist_from_database().await { - Ok(_) => ok_response(), - Err(e) => failed_to_reload_whitelist_response(e), - } -} - -pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { - let seconds_valid = seconds_valid_or_key; - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), - Err(e) => failed_to_generate_key_response(e), - } -} - -#[derive(Deserialize)] -pub struct KeyParam(String); - -pub async fn delete_auth_key_handler( - State(tracker): State>, - Path(seconds_valid_or_key): Path, -) -> Response { - match Key::from_str(&seconds_valid_or_key.0) { - Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), - Ok(key) => match tracker.remove_auth_key(&key.to_string()).await { - Ok(_) => ok_response(), - Err(e) => failed_to_delete_key_response(e), - }, - } -} - -pub async fn reload_keys_handler(State(tracker): State>) -> Response { - match tracker.load_keys_from_database().await { - Ok(_) => ok_response(), - Err(e) => failed_to_reload_keys_response(e), - } -} - -/// Serde deserialization decorator to map empty Strings to None, -fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> -where - D: Deserializer<'de>, - T: FromStr, - T::Err: fmt::Display, -{ - let opt = Option::::deserialize(de)?; - match opt.as_deref() { - None | Some("") => Ok(None), - Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), - } -} diff --git a/src/apis/mod.rs b/src/apis/mod.rs index a646d5543..fd7fdb6e5 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,6 +1,10 @@ -pub mod handlers; +pub mod context; pub mod middlewares; -pub mod resources; pub mod responses; pub mod routes; pub mod server; + +use serde::Deserialize; + +#[derive(Deserialize)] +pub struct InfoHashParam(pub String); diff --git a/src/apis/responses.rs b/src/apis/responses.rs index c0a6cbcf8..4a9c39bf9 100644 --- a/src/apis/responses.rs +++ b/src/apis/responses.rs @@ -1,15 +1,6 @@ -use std::error::Error; - use axum::http::{header, StatusCode}; -use axum::response::{IntoResponse, Json, Response}; +use axum::response::{IntoResponse, Response}; use serde::Serialize; -use serde_json::json; - -use crate::apis::resources::auth_key::AuthKey; -use crate::apis::resources::stats::Stats; -use crate::apis::resources::torrent::{ListItem, Torrent}; -use crate::tracker::services::statistics::TrackerMetrics; -use crate::tracker::services::torrent::{BasicInfo, Info}; /* code-review: When Axum cannot parse a path or query param it shows a message like this: @@ -38,36 +29,6 @@ pub enum ActionStatus<'a> { Err { reason: std::borrow::Cow<'a, str> }, } -// Resource responses - -#[must_use] -pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { - Json(Stats::from(tracker_metrics)) -} - -#[must_use] -pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { - Json(ListItem::new_vec(basic_infos)) -} - -#[must_use] -pub fn torrent_info_response(info: Info) -> Json { - Json(Torrent::from(info)) -} - -/// # Panics -/// -/// Will panic if it can't convert the `AuthKey` resource to json -#[must_use] -pub fn auth_key_response(auth_key: &AuthKey) -> Response { - ( - StatusCode::OK, - [(header::CONTENT_TYPE, "application/json; charset=utf-8")], - serde_json::to_string(auth_key).unwrap(), - ) - .into_response() -} - // OK response /// # Panics @@ -106,41 +67,6 @@ fn bad_request_response(body: &str) -> Response { .into_response() } -#[must_use] -pub fn torrent_not_known_response() -> Response { - Json(json!("torrent not known")).into_response() -} - -#[must_use] -pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to remove torrent from whitelist: {e}")) -} - -#[must_use] -pub fn failed_to_whitelist_torrent_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to whitelist torrent: {e}")) -} - -#[must_use] -pub fn failed_to_reload_whitelist_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to reload whitelist: {e}")) -} - -#[must_use] -pub fn failed_to_generate_key_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to generate key: {e}")) -} - -#[must_use] -pub fn failed_to_delete_key_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to delete key: {e}")) -} - -#[must_use] -pub fn failed_to_reload_keys_response(e: E) -> Response { - unhandled_rejection_response(format!("failed to reload keys: {e}")) -} - /// This error response is to keep backward compatibility with the old API. /// It should be a plain text or json. #[must_use] diff --git a/src/apis/routes.rs b/src/apis/routes.rs index ecc51090c..c567e50da 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -1,53 +1,19 @@ use std::sync::Arc; -use axum::routing::{delete, get, post}; use axum::{middleware, Router}; -use super::handlers::{ - add_torrent_to_whitelist_handler, delete_auth_key_handler, generate_auth_key_handler, get_stats_handler, get_torrent_handler, - get_torrents_handler, reload_keys_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler, -}; +use super::context::{auth_key, stats, torrent, whitelist}; use super::middlewares::auth::auth; use crate::tracker::Tracker; #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { - Router::new() - // Stats - .route("/api/stats", get(get_stats_handler).with_state(tracker.clone())) - // Torrents - .route( - "/api/torrent/:info_hash", - get(get_torrent_handler).with_state(tracker.clone()), - ) - .route("/api/torrents", get(get_torrents_handler).with_state(tracker.clone())) - // Whitelisted torrents - .route( - "/api/whitelist/:info_hash", - post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), - ) - .route( - "/api/whitelist/:info_hash", - delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), - ) - // Whitelist command - .route( - "/api/whitelist/reload", - get(reload_whitelist_handler).with_state(tracker.clone()), - ) - // Keys - .route( - // code-review: Axum does not allow two routes with the same path but different path variable name. - // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: - // POST /api/key - // DELETE /api/key/:key - "/api/key/:seconds_valid_or_key", - post(generate_auth_key_handler) - .with_state(tracker.clone()) - .delete(delete_auth_key_handler) - .with_state(tracker.clone()), - ) - // Keys command - .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker.clone())) - .layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) + let router = Router::new(); + + let router = auth_key::routes::add(router, tracker.clone()); + let router = stats::routes::add(router, tracker.clone()); + let router = whitelist::routes::add(router, tracker.clone()); + let router = torrent::routes::add(router, tracker.clone()); + + router.layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index 5a4abfb62..c7567e6fe 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker::apis::resources::auth_key::AuthKey; -use torrust_tracker::apis::resources::stats::Stats; -use torrust_tracker::apis::resources::torrent::{ListItem, Torrent}; +use torrust_tracker::apis::context::auth_key::resources::AuthKey; +use torrust_tracker::apis::context::stats::resources::Stats; +use torrust_tracker::apis::context::torrent::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index dac5907c2..ff4eb295b 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -132,7 +132,7 @@ mod tracker_apis { mod for_stats_resources { use std::str::FromStr; - use torrust_tracker::apis::resources::stats::Stats; + use torrust_tracker::apis::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; @@ -206,8 +206,8 @@ mod tracker_apis { mod for_torrent_resources { use std::str::FromStr; - use torrust_tracker::apis::resources::torrent::Torrent; - use torrust_tracker::apis::resources::{self, torrent}; + use torrust_tracker::apis::context::torrent::resources::peer::Peer; + use torrust_tracker::apis::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; @@ -383,7 +383,7 @@ mod tracker_apis { seeders: 1, completed: 0, leechers: 0, - peers: Some(vec![resources::peer::Peer::from(peer)]), + peers: Some(vec![Peer::from(peer)]), }, ) .await; From 8fe52c321371100a2bbae40f0df888adc1fa9d8c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Mar 2023 09:43:13 +0000 Subject: [PATCH 411/435] refactor(api): reorganize api tests in contexts After changing production code we follow the API contexts structure in tests too. --- tests/api/mod.rs | 1 + tests/api/tests/authentication.rs | 83 +++ tests/api/tests/configuration.rs | 17 + tests/api/tests/context/auth_key.rs | 265 ++++++++ tests/api/tests/context/mod.rs | 4 + tests/api/tests/context/stats.rs | 71 ++ tests/api/tests/context/torrent.rs | 249 +++++++ tests/api/tests/context/whitelist.rs | 258 +++++++ tests/api/tests/fixtures.rs | 13 + tests/api/tests/mod.rs | 4 + tests/tracker_api.rs | 983 +-------------------------- 11 files changed, 966 insertions(+), 982 deletions(-) create mode 100644 tests/api/tests/authentication.rs create mode 100644 tests/api/tests/configuration.rs create mode 100644 tests/api/tests/context/auth_key.rs create mode 100644 tests/api/tests/context/mod.rs create mode 100644 tests/api/tests/context/stats.rs create mode 100644 tests/api/tests/context/torrent.rs create mode 100644 tests/api/tests/context/whitelist.rs create mode 100644 tests/api/tests/fixtures.rs create mode 100644 tests/api/tests/mod.rs diff --git a/tests/api/mod.rs b/tests/api/mod.rs index fcb24e491..f59210b22 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -6,6 +6,7 @@ pub mod asserts; pub mod client; pub mod connection_info; pub mod test_environment; +pub mod tests; /// It forces a database error by dropping all tables. /// That makes any query fail. diff --git a/tests/api/tests/authentication.rs b/tests/api/tests/authentication.rs new file mode 100644 index 000000000..5183c8909 --- /dev/null +++ b/tests/api/tests/authentication.rs @@ -0,0 +1,83 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; +use crate::api::client::Client; +use crate::api::test_environment::running_test_environment; +use crate::common::http::{Query, QueryParam}; + +#[tokio::test] +async fn should_authenticate_requests_by_using_a_token_query_param() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let token = test_env.get_connection_info().api_token.unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) + .await; + + assert_eq!(response.status(), 200); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_missing() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::default()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_empty() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) + .await; + + assert_token_not_valid(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) + .await; + + assert_token_not_valid(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let token = test_env.get_connection_info().api_token.unwrap(); + + // At the beginning of the query component + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?token={token}&limit=1")) + .await; + + assert_eq!(response.status(), 200); + + // At the end of the query component + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?limit=1&token={token}")) + .await; + + assert_eq!(response.status(), 200); + + test_env.stop().await; +} diff --git a/tests/api/tests/configuration.rs b/tests/api/tests/configuration.rs new file mode 100644 index 000000000..f81201191 --- /dev/null +++ b/tests/api/tests/configuration.rs @@ -0,0 +1,17 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::api::test_environment::stopped_test_environment; + +#[tokio::test] +#[should_panic] +async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { + let mut test_env = stopped_test_environment(configuration::ephemeral()); + + let cfg = test_env.config_mut(); + + cfg.ssl_enabled = true; + cfg.ssl_key_path = Some("bad key path".to_string()); + cfg.ssl_cert_path = Some("bad cert path".to_string()); + + test_env.start().await; +} diff --git a/tests/api/tests/context/auth_key.rs b/tests/api/tests/context/auth_key.rs new file mode 100644 index 000000000..ee7121615 --- /dev/null +++ b/tests/api/tests/context/auth_key.rs @@ -0,0 +1,265 @@ +use std::time::Duration; + +use torrust_tracker::tracker::auth::Key; +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{ + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::api::client::Client; +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::force_database_error; +use crate::api::test_environment::running_test_environment; + +#[tokio::test] +async fn should_allow_generating_a_new_auth_key() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + + let response = Client::new(test_env.get_connection_info()) + .generate_auth_key(seconds_valid) + .await; + + let auth_key_resource = assert_auth_key_utf8(response).await; + + // Verify the key with the tracker + assert!(test_env + .tracker + .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; + + for invalid_key_duration in invalid_key_durations { + let response = Client::new(test_env.get_connection_info()) + .post(&format!("key/{invalid_key_duration}")) + .await; + + assert_invalid_key_duration_param(response, invalid_key_duration).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_auth_key_cannot_be_generated() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + force_database_error(&test_env.tracker); + + let seconds_valid = 60; + let response = Client::new(test_env.get_connection_info()) + .generate_auth_key(seconds_valid) + .await; + + assert_failed_to_generate_key(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_deleting_an_auth_key() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_auth_keys = [ + // "", it returns a 404 + // " ", it returns a 404 + "0", + "-1", + "INVALID AUTH KEY ID", + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8", // 32 char key cspell:disable-line + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line + ]; + + for invalid_auth_key in &invalid_auth_keys { + let response = Client::new(test_env.get_connection_info()) + .delete_auth_key(invalid_auth_key) + .await; + + assert_invalid_auth_key_param(response, invalid_auth_key).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_auth_key_cannot_be_deleted() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_failed_to_delete_key(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + + // Generate new auth key + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_token_not_valid(response).await; + + // Generate new auth key + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_reloading_keys() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(test_env.get_connection_info()).reload_keys().await; + + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_keys_cannot_be_reloaded() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()).reload_keys().await; + + assert_failed_to_reload_keys(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_reloading_keys_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .reload_keys() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .reload_keys() + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} diff --git a/tests/api/tests/context/mod.rs b/tests/api/tests/context/mod.rs new file mode 100644 index 000000000..6d3fb7566 --- /dev/null +++ b/tests/api/tests/context/mod.rs @@ -0,0 +1,4 @@ +pub mod auth_key; +pub mod stats; +pub mod torrent; +pub mod whitelist; diff --git a/tests/api/tests/context/stats.rs b/tests/api/tests/context/stats.rs new file mode 100644 index 000000000..99ae405b7 --- /dev/null +++ b/tests/api/tests/context/stats.rs @@ -0,0 +1,71 @@ +use std::str::FromStr; + +use torrust_tracker::apis::context::stats::resources::Stats; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; +use crate::api::client::Client; +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::test_environment::running_test_environment; +use crate::common::fixtures::PeerBuilder; + +#[tokio::test] +async fn should_allow_getting_tracker_statistics() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + test_env + .add_torrent_peer( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &PeerBuilder::default().into(), + ) + .await; + + let response = Client::new(test_env.get_connection_info()).get_tracker_statistics().await; + + assert_stats( + response, + Stats { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }, + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_tracker_statistics() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .get_tracker_statistics() + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} diff --git a/tests/api/tests/context/torrent.rs b/tests/api/tests/context/torrent.rs new file mode 100644 index 000000000..998c2afaf --- /dev/null +++ b/tests/api/tests/context/torrent.rs @@ -0,0 +1,249 @@ +use std::str::FromStr; + +use torrust_tracker::apis::context::torrent::resources::peer::Peer; +use torrust_tracker::apis::context::torrent::resources::torrent::{self, Torrent}; +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{ + assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, + assert_torrent_list, assert_torrent_not_known, assert_unauthorized, +}; +use crate::api::client::Client; +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::test_environment::running_test_environment; +use crate::api::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; +use crate::common::fixtures::PeerBuilder; +use crate::common::http::{Query, QueryParam}; + +#[tokio::test] +async fn should_allow_getting_torrents() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + + let response = Client::new(test_env.get_connection_info()).get_torrents(Query::empty()).await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_limiting_the_torrents_in_the_result() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) + .await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_the_torrents_result_pagination() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) + .await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; + + for invalid_offset in &invalid_offsets { + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; + + for invalid_limit in &invalid_limits { + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_torrents_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrents(Query::empty()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .get_torrents(Query::default()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_getting_a_torrent_info() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let peer = PeerBuilder::default().into(); + + test_env.add_torrent_peer(&info_hash, &peer).await; + + let response = Client::new(test_env.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_info( + response, + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![Peer::from(peer)]), + }, + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_not_known(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(test_env.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(test_env.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrent(&info_hash.to_string()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .get_torrent(&info_hash.to_string()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} diff --git a/tests/api/tests/context/whitelist.rs b/tests/api/tests/context/whitelist.rs new file mode 100644 index 000000000..29ea573c0 --- /dev/null +++ b/tests/api/tests/context/whitelist.rs @@ -0,0 +1,258 @@ +use std::str::FromStr; + +use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; + +use crate::api::asserts::{ + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, + assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::api::client::Client; +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::force_database_error; +use crate::api::test_environment::running_test_environment; +use crate::api::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; + +#[tokio::test] +async fn should_allow_whitelisting_a_torrent() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_ok(response).await; + assert!( + test_env + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let api_client = Client::new(test_env.get_connection_info()); + + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; + + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .whitelist_a_torrent(&info_hash) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .whitelist_a_torrent(&info_hash) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_torrent_cannot_be_whitelisted() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_failed_to_whitelist_torrent(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_removing_a_torrent_from_the_whitelist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_ok(response).await; + assert!(!test_env.tracker.is_info_hash_whitelisted(&info_hash).await); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) + .await; + + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_failed_to_remove_torrent_from_whitelist(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_token_not_valid(response).await; + + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_reload_the_whitelist_from_the_database() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + + assert_ok(response).await; + /* todo: this assert fails because the whitelist has not been reloaded yet. + We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent + is whitelisted and use that endpoint to check if the torrent is still there after reloading. + assert!( + !(test_env + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await) + ); + */ + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + + assert_failed_to_reload_whitelist(response).await; + + test_env.stop().await; +} diff --git a/tests/api/tests/fixtures.rs b/tests/api/tests/fixtures.rs new file mode 100644 index 000000000..6d147f190 --- /dev/null +++ b/tests/api/tests/fixtures.rs @@ -0,0 +1,13 @@ +use crate::common::fixtures::invalid_info_hashes; + +// When these infohashes are used in URL path params +// the response is a custom response returned in the handler +pub fn invalid_infohashes_returning_bad_request() -> Vec { + invalid_info_hashes() +} + +// When these infohashes are used in URL path params +// the response is an Axum response returned in the handler +pub fn invalid_infohashes_returning_not_found() -> Vec { + [String::new(), " ".to_string()].to_vec() +} diff --git a/tests/api/tests/mod.rs b/tests/api/tests/mod.rs new file mode 100644 index 000000000..38b4a2b37 --- /dev/null +++ b/tests/api/tests/mod.rs @@ -0,0 +1,4 @@ +pub mod authentication; +pub mod configuration; +pub mod context; +pub mod fixtures; diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs index ff4eb295b..3219bc987 100644 --- a/tests/tracker_api.rs +++ b/tests/tracker_api.rs @@ -1,988 +1,7 @@ /// Integration tests for the tracker API /// /// ```text -/// cargo test tracker_apis -- --nocapture +/// cargo test --test tracker_api /// ``` -extern crate rand; - mod api; mod common; - -mod tracker_apis { - use crate::common::fixtures::invalid_info_hashes; - - // When these infohashes are used in URL path params - // the response is a custom response returned in the handler - fn invalid_infohashes_returning_bad_request() -> Vec { - invalid_info_hashes() - } - - // When these infohashes are used in URL path params - // the response is an Axum response returned in the handler - fn invalid_infohashes_returning_not_found() -> Vec { - [String::new(), " ".to_string()].to_vec() - } - - mod configuration { - use torrust_tracker_test_helpers::configuration; - - use crate::api::test_environment::stopped_test_environment; - - #[tokio::test] - #[should_panic] - async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { - let mut test_env = stopped_test_environment(configuration::ephemeral()); - - let cfg = test_env.config_mut(); - - cfg.ssl_enabled = true; - cfg.ssl_key_path = Some("bad key path".to_string()); - cfg.ssl_cert_path = Some("bad cert path".to_string()); - - test_env.start().await; - } - } - - mod authentication { - use torrust_tracker_test_helpers::configuration; - - use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; - use crate::api::client::Client; - use crate::api::test_environment::running_test_environment; - use crate::common::http::{Query, QueryParam}; - - #[tokio::test] - async fn should_authenticate_requests_by_using_a_token_query_param() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let token = test_env.get_connection_info().api_token.unwrap(); - - let response = Client::new(test_env.get_connection_info()) - .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) - .await; - - assert_eq!(response.status(), 200); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_authenticate_requests_when_the_token_is_missing() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_request_with_query("stats", Query::default()) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_authenticate_requests_when_the_token_is_empty() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) - .await; - - assert_token_not_valid(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) - .await; - - assert_token_not_valid(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let token = test_env.get_connection_info().api_token.unwrap(); - - // At the beginning of the query component - let response = Client::new(test_env.get_connection_info()) - .get_request(&format!("torrents?token={token}&limit=1")) - .await; - - assert_eq!(response.status(), 200); - - // At the end of the query component - let response = Client::new(test_env.get_connection_info()) - .get_request(&format!("torrents?limit=1&token={token}")) - .await; - - assert_eq!(response.status(), 200); - - test_env.stop().await; - } - } - - mod for_stats_resources { - use std::str::FromStr; - - use torrust_tracker::apis::context::stats::resources::Stats; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::test_environment::running_test_environment; - use crate::common::fixtures::PeerBuilder; - - #[tokio::test] - async fn should_allow_getting_tracker_statistics() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - test_env - .add_torrent_peer( - &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), - &PeerBuilder::default().into(), - ) - .await; - - let response = Client::new(test_env.get_connection_info()).get_tracker_statistics().await; - - assert_stats( - response, - Stats { - torrents: 1, - seeders: 1, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_tracker_statistics() - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .get_tracker_statistics() - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - } - - mod for_torrent_resources { - use std::str::FromStr; - - use torrust_tracker::apis::context::torrent::resources::peer::Peer; - use torrust_tracker::apis::context::torrent::resources::torrent::{self, Torrent}; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; - use crate::api::asserts::{ - assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, - assert_torrent_list, assert_torrent_not_known, assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::test_environment::running_test_environment; - use crate::common::fixtures::PeerBuilder; - use crate::common::http::{Query, QueryParam}; - - #[tokio::test] - async fn should_allow_getting_torrents() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - - let response = Client::new(test_env.get_connection_info()).get_torrents(Query::empty()).await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_limiting_the_torrents_in_the_result() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - - test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_the_torrents_result_pagination() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - - test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; - - let response = Client::new(test_env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) - .await; - - assert_torrent_list( - response, - vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent - }], - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; - - for invalid_offset in &invalid_offsets { - let response = Client::new(test_env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) - .await; - - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; - - for invalid_limit in &invalid_limits { - let response = Client::new(test_env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) - .await; - - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_torrents(Query::empty()) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .get_torrents(Query::default()) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_getting_a_torrent_info() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let peer = PeerBuilder::default().into(); - - test_env.add_torrent_peer(&info_hash, &peer).await; - - let response = Client::new(test_env.get_connection_info()) - .get_torrent(&info_hash.to_string()) - .await; - - assert_torrent_info( - response, - Torrent { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - seeders: 1, - completed: 0, - leechers: 0, - peers: Some(vec![Peer::from(peer)]), - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - let response = Client::new(test_env.get_connection_info()) - .get_torrent(&info_hash.to_string()) - .await; - - assert_torrent_not_known(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) - .get_torrent(invalid_infohash) - .await; - - assert_invalid_infohash_param(response, invalid_infohash).await; - } - - for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) - .get_torrent(invalid_infohash) - .await; - - assert_not_found(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - - test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_torrent(&info_hash.to_string()) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .get_torrent(&info_hash.to_string()) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - } - - mod for_whitelisted_torrent_resources { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use super::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; - use crate::api::asserts::{ - assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, - assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, - assert_token_not_valid, assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::force_database_error; - use crate::api::test_environment::running_test_environment; - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_ok(response).await; - assert!( - test_env - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await - ); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let api_client = Client::new(test_env.get_connection_info()); - - let response = api_client.whitelist_a_torrent(&info_hash).await; - assert_ok(response).await; - - let response = api_client.whitelist_a_torrent(&info_hash).await; - assert_ok(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .whitelist_a_torrent(&info_hash) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .whitelist_a_torrent(&info_hash) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; - - assert_failed_to_whitelist_torrent(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(invalid_infohash) - .await; - - assert_invalid_infohash_param(response, invalid_infohash).await; - } - - for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(invalid_infohash) - .await; - - assert_not_found(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_removing_a_torrent_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_ok(response).await; - assert!(!test_env.tracker.is_info_hash_whitelisted(&info_hash).await); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) - .await; - - assert_ok(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(invalid_infohash) - .await; - - assert_invalid_infohash_param(response, invalid_infohash).await; - } - - for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(invalid_infohash) - .await; - - assert_not_found(response).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_failed_to_remove_torrent_from_whitelist(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_token_not_valid(response).await; - - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .remove_torrent_from_whitelist(&hash) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_reload_the_whitelist_from_the_database() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; - - assert_ok(response).await; - /* todo: this assert fails because the whitelist has not been reloaded yet. - We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent - is whitelisted and use that endpoint to check if the torrent is still there after reloading. - assert!( - !(test_env - .tracker - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) - .await) - ); - */ - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; - - assert_failed_to_reload_whitelist(response).await; - - test_env.stop().await; - } - } - - mod for_key_resources { - use std::time::Duration; - - use torrust_tracker::tracker::auth::Key; - use torrust_tracker_test_helpers::configuration; - - use crate::api::asserts::{ - assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, - assert_unauthorized, - }; - use crate::api::client::Client; - use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::api::force_database_error; - use crate::api::test_environment::running_test_environment; - - #[tokio::test] - async fn should_allow_generating_a_new_auth_key() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - - let response = Client::new(test_env.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; - - let auth_key_resource = assert_auth_key_utf8(response).await; - - // Verify the key with the tracker - assert!(test_env - .tracker - .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) - .await - .is_ok()); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .generate_auth_key(seconds_valid) - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .generate_auth_key(seconds_valid) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let invalid_key_durations = [ - // "", it returns 404 - // " ", it returns 404 - "-1", "text", - ]; - - for invalid_key_duration in invalid_key_durations { - let response = Client::new(test_env.get_connection_info()) - .post(&format!("key/{invalid_key_duration}")) - .await; - - assert_invalid_key_duration_param(response, invalid_key_duration).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_auth_key_cannot_be_generated() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - force_database_error(&test_env.tracker); - - let seconds_valid = 60; - let response = Client::new(test_env.get_connection_info()) - .generate_auth_key(seconds_valid) - .await; - - assert_failed_to_generate_key(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_deleting_an_auth_key() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - let auth_key = test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(test_env.get_connection_info()) - .delete_auth_key(&auth_key.key.to_string()) - .await; - - assert_ok(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let invalid_auth_keys = [ - // "", it returns a 404 - // " ", it returns a 404 - "0", - "-1", - "INVALID AUTH KEY ID", - "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8", // 32 char key cspell:disable-line - "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line - ]; - - for invalid_auth_key in &invalid_auth_keys { - let response = Client::new(test_env.get_connection_info()) - .delete_auth_key(invalid_auth_key) - .await; - - assert_invalid_auth_key_param(response, invalid_auth_key).await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - let auth_key = test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()) - .delete_auth_key(&auth_key.key.to_string()) - .await; - - assert_failed_to_delete_key(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - - // Generate new auth key - let auth_key = test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .delete_auth_key(&auth_key.key.to_string()) - .await; - - assert_token_not_valid(response).await; - - // Generate new auth key - let auth_key = test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .delete_auth_key(&auth_key.key.to_string()) - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_reloading_keys() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(test_env.get_connection_info()).reload_keys().await; - - assert_ok(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_keys_cannot_be_reloaded() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - force_database_error(&test_env.tracker); - - let response = Client::new(test_env.get_connection_info()).reload_keys().await; - - assert_failed_to_reload_keys(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let seconds_valid = 60; - test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) - .await - .unwrap(); - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .reload_keys() - .await; - - assert_token_not_valid(response).await; - - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .reload_keys() - .await; - - assert_unauthorized(response).await; - - test_env.stop().await; - } - } -} From 70d94ad180bab0a2b48b627ac44878c2c5bd7958 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Mar 2023 11:03:58 +0000 Subject: [PATCH 412/435] refactor(api): add namespace v1 to API --- src/apis/mod.rs | 4 +--- src/apis/routes.rs | 9 +++------ src/apis/{ => v1}/context/auth_key/handlers.rs | 4 ++-- src/apis/{ => v1}/context/auth_key/mod.rs | 0 src/apis/{ => v1}/context/auth_key/resources.rs | 0 src/apis/{ => v1}/context/auth_key/responses.rs | 4 ++-- src/apis/{ => v1}/context/auth_key/routes.rs | 0 src/apis/{ => v1}/context/mod.rs | 0 src/apis/{ => v1}/context/stats/handlers.rs | 0 src/apis/{ => v1}/context/stats/mod.rs | 0 src/apis/{ => v1}/context/stats/resources.rs | 0 src/apis/{ => v1}/context/stats/responses.rs | 0 src/apis/{ => v1}/context/stats/routes.rs | 0 src/apis/{ => v1}/context/torrent/handlers.rs | 2 +- src/apis/{ => v1}/context/torrent/mod.rs | 0 src/apis/{ => v1}/context/torrent/resources/mod.rs | 0 src/apis/{ => v1}/context/torrent/resources/peer.rs | 0 .../{ => v1}/context/torrent/resources/torrent.rs | 4 ++-- src/apis/{ => v1}/context/torrent/responses.rs | 0 src/apis/{ => v1}/context/torrent/routes.rs | 0 src/apis/{ => v1}/context/whitelist/handlers.rs | 2 +- src/apis/{ => v1}/context/whitelist/mod.rs | 0 src/apis/{ => v1}/context/whitelist/responses.rs | 2 +- src/apis/{ => v1}/context/whitelist/routes.rs | 0 src/apis/{ => v1}/middlewares/auth.rs | 2 +- src/apis/{ => v1}/middlewares/mod.rs | 0 src/apis/v1/mod.rs | 4 ++++ src/apis/{ => v1}/responses.rs | 0 src/apis/v1/routes.rs | 13 +++++++++++++ tests/api/asserts.rs | 6 +++--- tests/api/tests/mod.rs | 3 +-- tests/api/tests/{ => v1}/authentication.rs | 0 tests/api/tests/{ => v1}/context/auth_key.rs | 0 tests/api/tests/{ => v1}/context/mod.rs | 0 tests/api/tests/{ => v1}/context/stats.rs | 2 +- tests/api/tests/{ => v1}/context/torrent.rs | 4 ++-- tests/api/tests/{ => v1}/context/whitelist.rs | 0 tests/api/tests/v1/mod.rs | 2 ++ 38 files changed, 40 insertions(+), 27 deletions(-) rename src/apis/{ => v1}/context/auth_key/handlers.rs (91%) rename src/apis/{ => v1}/context/auth_key/mod.rs (100%) rename src/apis/{ => v1}/context/auth_key/resources.rs (100%) rename src/apis/{ => v1}/context/auth_key/responses.rs (88%) rename src/apis/{ => v1}/context/auth_key/routes.rs (100%) rename src/apis/{ => v1}/context/mod.rs (100%) rename src/apis/{ => v1}/context/stats/handlers.rs (100%) rename src/apis/{ => v1}/context/stats/mod.rs (100%) rename src/apis/{ => v1}/context/stats/resources.rs (100%) rename src/apis/{ => v1}/context/stats/responses.rs (100%) rename src/apis/{ => v1}/context/stats/routes.rs (100%) rename src/apis/{ => v1}/context/torrent/handlers.rs (96%) rename src/apis/{ => v1}/context/torrent/mod.rs (100%) rename src/apis/{ => v1}/context/torrent/resources/mod.rs (100%) rename src/apis/{ => v1}/context/torrent/resources/peer.rs (100%) rename src/apis/{ => v1}/context/torrent/resources/torrent.rs (96%) rename src/apis/{ => v1}/context/torrent/responses.rs (100%) rename src/apis/{ => v1}/context/torrent/routes.rs (100%) rename src/apis/{ => v1}/context/whitelist/handlers.rs (95%) rename src/apis/{ => v1}/context/whitelist/mod.rs (100%) rename src/apis/{ => v1}/context/whitelist/responses.rs (90%) rename src/apis/{ => v1}/context/whitelist/routes.rs (100%) rename src/apis/{ => v1}/middlewares/auth.rs (96%) rename src/apis/{ => v1}/middlewares/mod.rs (100%) create mode 100644 src/apis/v1/mod.rs rename src/apis/{ => v1}/responses.rs (100%) create mode 100644 src/apis/v1/routes.rs rename tests/api/tests/{ => v1}/authentication.rs (100%) rename tests/api/tests/{ => v1}/context/auth_key.rs (100%) rename tests/api/tests/{ => v1}/context/mod.rs (100%) rename tests/api/tests/{ => v1}/context/stats.rs (97%) rename tests/api/tests/{ => v1}/context/torrent.rs (98%) rename tests/api/tests/{ => v1}/context/whitelist.rs (100%) create mode 100644 tests/api/tests/v1/mod.rs diff --git a/src/apis/mod.rs b/src/apis/mod.rs index fd7fdb6e5..1bc257916 100644 --- a/src/apis/mod.rs +++ b/src/apis/mod.rs @@ -1,8 +1,6 @@ -pub mod context; -pub mod middlewares; -pub mod responses; pub mod routes; pub mod server; +pub mod v1; use serde::Deserialize; diff --git a/src/apis/routes.rs b/src/apis/routes.rs index c567e50da..9e33ca77e 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -2,18 +2,15 @@ use std::sync::Arc; use axum::{middleware, Router}; -use super::context::{auth_key, stats, torrent, whitelist}; -use super::middlewares::auth::auth; +use super::v1; +use super::v1::middlewares::auth::auth; use crate::tracker::Tracker; #[allow(clippy::needless_pass_by_value)] pub fn router(tracker: Arc) -> Router { let router = Router::new(); - let router = auth_key::routes::add(router, tracker.clone()); - let router = stats::routes::add(router, tracker.clone()); - let router = whitelist::routes::add(router, tracker.clone()); - let router = torrent::routes::add(router, tracker.clone()); + let router = v1::routes::add(router, tracker.clone()); router.layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } diff --git a/src/apis/context/auth_key/handlers.rs b/src/apis/v1/context/auth_key/handlers.rs similarity index 91% rename from src/apis/context/auth_key/handlers.rs rename to src/apis/v1/context/auth_key/handlers.rs index af78b3f4c..d21f08299 100644 --- a/src/apis/context/auth_key/handlers.rs +++ b/src/apis/v1/context/auth_key/handlers.rs @@ -9,8 +9,8 @@ use serde::Deserialize; use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, }; -use crate::apis::context::auth_key::resources::AuthKey; -use crate::apis::responses::{invalid_auth_key_param_response, ok_response}; +use crate::apis::v1::context::auth_key::resources::AuthKey; +use crate::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; use crate::tracker::auth::Key; use crate::tracker::Tracker; diff --git a/src/apis/context/auth_key/mod.rs b/src/apis/v1/context/auth_key/mod.rs similarity index 100% rename from src/apis/context/auth_key/mod.rs rename to src/apis/v1/context/auth_key/mod.rs diff --git a/src/apis/context/auth_key/resources.rs b/src/apis/v1/context/auth_key/resources.rs similarity index 100% rename from src/apis/context/auth_key/resources.rs rename to src/apis/v1/context/auth_key/resources.rs diff --git a/src/apis/context/auth_key/responses.rs b/src/apis/v1/context/auth_key/responses.rs similarity index 88% rename from src/apis/context/auth_key/responses.rs rename to src/apis/v1/context/auth_key/responses.rs index 8c1bf58dc..9b8fcebe2 100644 --- a/src/apis/context/auth_key/responses.rs +++ b/src/apis/v1/context/auth_key/responses.rs @@ -3,8 +3,8 @@ use std::error::Error; use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Response}; -use crate::apis::context::auth_key::resources::AuthKey; -use crate::apis::responses::unhandled_rejection_response; +use crate::apis::v1::context::auth_key::resources::AuthKey; +use crate::apis::v1::responses::unhandled_rejection_response; /// # Panics /// diff --git a/src/apis/context/auth_key/routes.rs b/src/apis/v1/context/auth_key/routes.rs similarity index 100% rename from src/apis/context/auth_key/routes.rs rename to src/apis/v1/context/auth_key/routes.rs diff --git a/src/apis/context/mod.rs b/src/apis/v1/context/mod.rs similarity index 100% rename from src/apis/context/mod.rs rename to src/apis/v1/context/mod.rs diff --git a/src/apis/context/stats/handlers.rs b/src/apis/v1/context/stats/handlers.rs similarity index 100% rename from src/apis/context/stats/handlers.rs rename to src/apis/v1/context/stats/handlers.rs diff --git a/src/apis/context/stats/mod.rs b/src/apis/v1/context/stats/mod.rs similarity index 100% rename from src/apis/context/stats/mod.rs rename to src/apis/v1/context/stats/mod.rs diff --git a/src/apis/context/stats/resources.rs b/src/apis/v1/context/stats/resources.rs similarity index 100% rename from src/apis/context/stats/resources.rs rename to src/apis/v1/context/stats/resources.rs diff --git a/src/apis/context/stats/responses.rs b/src/apis/v1/context/stats/responses.rs similarity index 100% rename from src/apis/context/stats/responses.rs rename to src/apis/v1/context/stats/responses.rs diff --git a/src/apis/context/stats/routes.rs b/src/apis/v1/context/stats/routes.rs similarity index 100% rename from src/apis/context/stats/routes.rs rename to src/apis/v1/context/stats/routes.rs diff --git a/src/apis/context/torrent/handlers.rs b/src/apis/v1/context/torrent/handlers.rs similarity index 96% rename from src/apis/context/torrent/handlers.rs rename to src/apis/v1/context/torrent/handlers.rs index 1a8280e75..fc816cdbf 100644 --- a/src/apis/context/torrent/handlers.rs +++ b/src/apis/v1/context/torrent/handlers.rs @@ -8,7 +8,7 @@ use serde::{de, Deserialize, Deserializer}; use super::resources::torrent::ListItem; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::apis::responses::invalid_info_hash_param_response; +use crate::apis::v1::responses::invalid_info_hash_param_response; use crate::apis::InfoHashParam; use crate::protocol::info_hash::InfoHash; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; diff --git a/src/apis/context/torrent/mod.rs b/src/apis/v1/context/torrent/mod.rs similarity index 100% rename from src/apis/context/torrent/mod.rs rename to src/apis/v1/context/torrent/mod.rs diff --git a/src/apis/context/torrent/resources/mod.rs b/src/apis/v1/context/torrent/resources/mod.rs similarity index 100% rename from src/apis/context/torrent/resources/mod.rs rename to src/apis/v1/context/torrent/resources/mod.rs diff --git a/src/apis/context/torrent/resources/peer.rs b/src/apis/v1/context/torrent/resources/peer.rs similarity index 100% rename from src/apis/context/torrent/resources/peer.rs rename to src/apis/v1/context/torrent/resources/peer.rs diff --git a/src/apis/context/torrent/resources/torrent.rs b/src/apis/v1/context/torrent/resources/torrent.rs similarity index 96% rename from src/apis/context/torrent/resources/torrent.rs rename to src/apis/v1/context/torrent/resources/torrent.rs index 1099dc923..48f4c58f0 100644 --- a/src/apis/context/torrent/resources/torrent.rs +++ b/src/apis/v1/context/torrent/resources/torrent.rs @@ -75,8 +75,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use super::Torrent; - use crate::apis::context::torrent::resources::peer::Peer; - use crate::apis::context::torrent::resources::torrent::ListItem; + use crate::apis::v1::context::torrent::resources::peer::Peer; + use crate::apis::v1::context::torrent::resources::torrent::ListItem; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; diff --git a/src/apis/context/torrent/responses.rs b/src/apis/v1/context/torrent/responses.rs similarity index 100% rename from src/apis/context/torrent/responses.rs rename to src/apis/v1/context/torrent/responses.rs diff --git a/src/apis/context/torrent/routes.rs b/src/apis/v1/context/torrent/routes.rs similarity index 100% rename from src/apis/context/torrent/routes.rs rename to src/apis/v1/context/torrent/routes.rs diff --git a/src/apis/context/whitelist/handlers.rs b/src/apis/v1/context/whitelist/handlers.rs similarity index 95% rename from src/apis/context/whitelist/handlers.rs rename to src/apis/v1/context/whitelist/handlers.rs index c1e90a509..325f20e26 100644 --- a/src/apis/context/whitelist/handlers.rs +++ b/src/apis/v1/context/whitelist/handlers.rs @@ -7,7 +7,7 @@ use axum::response::Response; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::apis::responses::{invalid_info_hash_param_response, ok_response}; +use crate::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::apis::InfoHashParam; use crate::protocol::info_hash::InfoHash; use crate::tracker::Tracker; diff --git a/src/apis/context/whitelist/mod.rs b/src/apis/v1/context/whitelist/mod.rs similarity index 100% rename from src/apis/context/whitelist/mod.rs rename to src/apis/v1/context/whitelist/mod.rs diff --git a/src/apis/context/whitelist/responses.rs b/src/apis/v1/context/whitelist/responses.rs similarity index 90% rename from src/apis/context/whitelist/responses.rs rename to src/apis/v1/context/whitelist/responses.rs index dd2727898..197d4c90b 100644 --- a/src/apis/context/whitelist/responses.rs +++ b/src/apis/v1/context/whitelist/responses.rs @@ -2,7 +2,7 @@ use std::error::Error; use axum::response::Response; -use crate::apis::responses::unhandled_rejection_response; +use crate::apis::v1::responses::unhandled_rejection_response; #[must_use] pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { diff --git a/src/apis/context/whitelist/routes.rs b/src/apis/v1/context/whitelist/routes.rs similarity index 100% rename from src/apis/context/whitelist/routes.rs rename to src/apis/v1/context/whitelist/routes.rs diff --git a/src/apis/middlewares/auth.rs b/src/apis/v1/middlewares/auth.rs similarity index 96% rename from src/apis/middlewares/auth.rs rename to src/apis/v1/middlewares/auth.rs index f2745d42e..e729072b6 100644 --- a/src/apis/middlewares/auth.rs +++ b/src/apis/v1/middlewares/auth.rs @@ -7,7 +7,7 @@ use axum::response::{IntoResponse, Response}; use serde::Deserialize; use torrust_tracker_configuration::{Configuration, HttpApi}; -use crate::apis::responses::unhandled_rejection_response; +use crate::apis::v1::responses::unhandled_rejection_response; #[derive(Deserialize, Debug)] pub struct QueryParams { diff --git a/src/apis/middlewares/mod.rs b/src/apis/v1/middlewares/mod.rs similarity index 100% rename from src/apis/middlewares/mod.rs rename to src/apis/v1/middlewares/mod.rs diff --git a/src/apis/v1/mod.rs b/src/apis/v1/mod.rs new file mode 100644 index 000000000..e87984b8e --- /dev/null +++ b/src/apis/v1/mod.rs @@ -0,0 +1,4 @@ +pub mod context; +pub mod middlewares; +pub mod responses; +pub mod routes; diff --git a/src/apis/responses.rs b/src/apis/v1/responses.rs similarity index 100% rename from src/apis/responses.rs rename to src/apis/v1/responses.rs diff --git a/src/apis/v1/routes.rs b/src/apis/v1/routes.rs new file mode 100644 index 000000000..9bac47937 --- /dev/null +++ b/src/apis/v1/routes.rs @@ -0,0 +1,13 @@ +use std::sync::Arc; + +use axum::Router; + +use super::context::{auth_key, stats, torrent, whitelist}; +use crate::tracker::Tracker; + +pub fn add(router: Router, tracker: Arc) -> Router { + let router = auth_key::routes::add(router, tracker.clone()); + let router = stats::routes::add(router, tracker.clone()); + let router = whitelist::routes::add(router, tracker.clone()); + torrent::routes::add(router, tracker) +} diff --git a/tests/api/asserts.rs b/tests/api/asserts.rs index c7567e6fe..d37bcdbb4 100644 --- a/tests/api/asserts.rs +++ b/tests/api/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker::apis::context::auth_key::resources::AuthKey; -use torrust_tracker::apis::context::stats::resources::Stats; -use torrust_tracker::apis::context::torrent::resources::torrent::{ListItem, Torrent}; +use torrust_tracker::apis::v1::context::auth_key::resources::AuthKey; +use torrust_tracker::apis::v1::context::stats::resources::Stats; +use torrust_tracker::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/api/tests/mod.rs b/tests/api/tests/mod.rs index 38b4a2b37..c795e0032 100644 --- a/tests/api/tests/mod.rs +++ b/tests/api/tests/mod.rs @@ -1,4 +1,3 @@ -pub mod authentication; pub mod configuration; -pub mod context; pub mod fixtures; +pub mod v1; diff --git a/tests/api/tests/authentication.rs b/tests/api/tests/v1/authentication.rs similarity index 100% rename from tests/api/tests/authentication.rs rename to tests/api/tests/v1/authentication.rs diff --git a/tests/api/tests/context/auth_key.rs b/tests/api/tests/v1/context/auth_key.rs similarity index 100% rename from tests/api/tests/context/auth_key.rs rename to tests/api/tests/v1/context/auth_key.rs diff --git a/tests/api/tests/context/mod.rs b/tests/api/tests/v1/context/mod.rs similarity index 100% rename from tests/api/tests/context/mod.rs rename to tests/api/tests/v1/context/mod.rs diff --git a/tests/api/tests/context/stats.rs b/tests/api/tests/v1/context/stats.rs similarity index 97% rename from tests/api/tests/context/stats.rs rename to tests/api/tests/v1/context/stats.rs index 99ae405b7..2d9423deb 100644 --- a/tests/api/tests/context/stats.rs +++ b/tests/api/tests/v1/context/stats.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use torrust_tracker::apis::context::stats::resources::Stats; +use torrust_tracker::apis::v1::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; diff --git a/tests/api/tests/context/torrent.rs b/tests/api/tests/v1/context/torrent.rs similarity index 98% rename from tests/api/tests/context/torrent.rs rename to tests/api/tests/v1/context/torrent.rs index 998c2afaf..cbe216d6c 100644 --- a/tests/api/tests/context/torrent.rs +++ b/tests/api/tests/v1/context/torrent.rs @@ -1,7 +1,7 @@ use std::str::FromStr; -use torrust_tracker::apis::context::torrent::resources::peer::Peer; -use torrust_tracker::apis::context::torrent::resources::torrent::{self, Torrent}; +use torrust_tracker::apis::v1::context::torrent::resources::peer::Peer; +use torrust_tracker::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; diff --git a/tests/api/tests/context/whitelist.rs b/tests/api/tests/v1/context/whitelist.rs similarity index 100% rename from tests/api/tests/context/whitelist.rs rename to tests/api/tests/v1/context/whitelist.rs diff --git a/tests/api/tests/v1/mod.rs b/tests/api/tests/v1/mod.rs new file mode 100644 index 000000000..6a8d9709d --- /dev/null +++ b/tests/api/tests/v1/mod.rs @@ -0,0 +1,2 @@ +pub mod authentication; +pub mod context; From d9a4266a8344db4078d189ecf069be49d125171e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Mar 2023 11:58:09 +0000 Subject: [PATCH 413/435] feat(api): [#238] API also served with version prefix /api/v1/ For the time being we accept both versions `/api/` and `/api/v1`: http://0.0.0.0:1212/api/stats?token=MyAccessToken http://0.0.0.0:1212/api/v1/stats?token=MyAccessToken --- src/apis/routes.rs | 4 +++- src/apis/v1/context/auth_key/routes.rs | 10 +++++----- src/apis/v1/context/stats/routes.rs | 4 ++-- src/apis/v1/context/torrent/routes.rs | 6 +++--- src/apis/v1/context/whitelist/routes.rs | 10 ++++++---- src/apis/v1/routes.rs | 20 ++++++++++++++----- tests/api/mod.rs | 4 +--- tests/api/tests/v1/mod.rs | 2 -- tests/api/{ => v1}/asserts.rs | 0 tests/api/{ => v1}/client.rs | 4 ++-- tests/api/v1/mod.rs | 3 +++ .../{tests/v1 => v1/tests}/authentication.rs | 4 ++-- tests/api/{ => v1}/tests/configuration.rs | 0 .../v1 => v1/tests}/context/auth_key.rs | 10 +++++----- .../api/{tests/v1 => v1/tests}/context/mod.rs | 0 .../{tests/v1 => v1/tests}/context/stats.rs | 4 ++-- .../{tests/v1 => v1/tests}/context/torrent.rs | 10 +++++----- .../v1 => v1/tests}/context/whitelist.rs | 12 +++++------ tests/api/{ => v1}/tests/fixtures.rs | 0 tests/api/{ => v1}/tests/mod.rs | 3 ++- 20 files changed, 62 insertions(+), 48 deletions(-) delete mode 100644 tests/api/tests/v1/mod.rs rename tests/api/{ => v1}/asserts.rs (100%) rename tests/api/{ => v1}/client.rs (97%) create mode 100644 tests/api/v1/mod.rs rename tests/api/{tests/v1 => v1/tests}/authentication.rs (95%) rename tests/api/{ => v1}/tests/configuration.rs (100%) rename tests/api/{tests/v1 => v1/tests}/context/auth_key.rs (99%) rename tests/api/{tests/v1 => v1/tests}/context/mod.rs (100%) rename tests/api/{tests/v1 => v1/tests}/context/stats.rs (94%) rename tests/api/{tests/v1 => v1/tests}/context/torrent.rs (97%) rename tests/api/{tests/v1 => v1/tests}/context/whitelist.rs (97%) rename tests/api/{ => v1}/tests/fixtures.rs (100%) rename tests/api/{ => v1}/tests/mod.rs (50%) diff --git a/src/apis/routes.rs b/src/apis/routes.rs index 9e33ca77e..2545d6b88 100644 --- a/src/apis/routes.rs +++ b/src/apis/routes.rs @@ -10,7 +10,9 @@ use crate::tracker::Tracker; pub fn router(tracker: Arc) -> Router { let router = Router::new(); - let router = v1::routes::add(router, tracker.clone()); + let prefix = "/api"; + + let router = v1::routes::add(prefix, router, tracker.clone()); router.layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) } diff --git a/src/apis/v1/context/auth_key/routes.rs b/src/apis/v1/context/auth_key/routes.rs index 2a4f5b9dd..9b155c2a5 100644 --- a/src/apis/v1/context/auth_key/routes.rs +++ b/src/apis/v1/context/auth_key/routes.rs @@ -6,20 +6,20 @@ use axum::Router; use super::handlers::{delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // Keys router .route( // code-review: Axum does not allow two routes with the same path but different path variable name. // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: - // POST /api/key - // DELETE /api/key/:key - "/api/key/:seconds_valid_or_key", + // POST /key + // DELETE /key/:key + &format!("{prefix}/key/:seconds_valid_or_key"), post(generate_auth_key_handler) .with_state(tracker.clone()) .delete(delete_auth_key_handler) .with_state(tracker.clone()), ) // Keys command - .route("/api/keys/reload", get(reload_keys_handler).with_state(tracker)) + .route(&format!("{prefix}/keys/reload"), get(reload_keys_handler).with_state(tracker)) } diff --git a/src/apis/v1/context/stats/routes.rs b/src/apis/v1/context/stats/routes.rs index 8791ed25a..07f88aa70 100644 --- a/src/apis/v1/context/stats/routes.rs +++ b/src/apis/v1/context/stats/routes.rs @@ -6,6 +6,6 @@ use axum::Router; use super::handlers::get_stats_handler; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { - router.route("/api/stats", get(get_stats_handler).with_state(tracker)) +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + router.route(&format!("{prefix}/stats"), get(get_stats_handler).with_state(tracker)) } diff --git a/src/apis/v1/context/torrent/routes.rs b/src/apis/v1/context/torrent/routes.rs index 234f17223..00faa9665 100644 --- a/src/apis/v1/context/torrent/routes.rs +++ b/src/apis/v1/context/torrent/routes.rs @@ -6,12 +6,12 @@ use axum::Router; use super::handlers::{get_torrent_handler, get_torrents_handler}; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // Torrents router .route( - "/api/torrent/:info_hash", + &format!("{prefix}/torrent/:info_hash"), get(get_torrent_handler).with_state(tracker.clone()), ) - .route("/api/torrents", get(get_torrents_handler).with_state(tracker)) + .route(&format!("{prefix}/torrents"), get(get_torrents_handler).with_state(tracker)) } diff --git a/src/apis/v1/context/whitelist/routes.rs b/src/apis/v1/context/whitelist/routes.rs index 1349f8bc1..06011b462 100644 --- a/src/apis/v1/context/whitelist/routes.rs +++ b/src/apis/v1/context/whitelist/routes.rs @@ -6,17 +6,19 @@ use axum::Router; use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + let prefix = format!("{prefix}/whitelist"); + router // Whitelisted torrents .route( - "/api/whitelist/:info_hash", + &format!("{prefix}/:info_hash"), post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) .route( - "/api/whitelist/:info_hash", + &format!("{prefix}/:info_hash"), delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), ) // Whitelist commands - .route("/api/whitelist/reload", get(reload_whitelist_handler).with_state(tracker)) + .route(&format!("{prefix}/reload"), get(reload_whitelist_handler).with_state(tracker)) } diff --git a/src/apis/v1/routes.rs b/src/apis/v1/routes.rs index 9bac47937..d45319c4b 100644 --- a/src/apis/v1/routes.rs +++ b/src/apis/v1/routes.rs @@ -5,9 +5,19 @@ use axum::Router; use super::context::{auth_key, stats, torrent, whitelist}; use crate::tracker::Tracker; -pub fn add(router: Router, tracker: Arc) -> Router { - let router = auth_key::routes::add(router, tracker.clone()); - let router = stats::routes::add(router, tracker.clone()); - let router = whitelist::routes::add(router, tracker.clone()); - torrent::routes::add(router, tracker) +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + // Without `v1` prefix. + // We keep the old API endpoints without `v1` prefix for backward compatibility. + // todo: remove when the torrust index backend is using the `v1` prefix. + let router = auth_key::routes::add(prefix, router, tracker.clone()); + let router = stats::routes::add(prefix, router, tracker.clone()); + let router = whitelist::routes::add(prefix, router, tracker.clone()); + let router = torrent::routes::add(prefix, router, tracker.clone()); + + // With `v1` prefix + let v1_prefix = format!("{prefix}/v1"); + let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); + let router = stats::routes::add(&v1_prefix, router, tracker.clone()); + let router = whitelist::routes::add(&v1_prefix, router, tracker.clone()); + torrent::routes::add(&v1_prefix, router, tracker) } diff --git a/tests/api/mod.rs b/tests/api/mod.rs index f59210b22..7022da9b4 100644 --- a/tests/api/mod.rs +++ b/tests/api/mod.rs @@ -2,11 +2,9 @@ use std::sync::Arc; use torrust_tracker::tracker::Tracker; -pub mod asserts; -pub mod client; pub mod connection_info; pub mod test_environment; -pub mod tests; +pub mod v1; /// It forces a database error by dropping all tables. /// That makes any query fail. diff --git a/tests/api/tests/v1/mod.rs b/tests/api/tests/v1/mod.rs deleted file mode 100644 index 6a8d9709d..000000000 --- a/tests/api/tests/v1/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod authentication; -pub mod context; diff --git a/tests/api/asserts.rs b/tests/api/v1/asserts.rs similarity index 100% rename from tests/api/asserts.rs rename to tests/api/v1/asserts.rs diff --git a/tests/api/client.rs b/tests/api/v1/client.rs similarity index 97% rename from tests/api/client.rs rename to tests/api/v1/client.rs index f99805570..d5cdf69f6 100644 --- a/tests/api/client.rs +++ b/tests/api/v1/client.rs @@ -1,6 +1,6 @@ use reqwest::Response; -use super::connection_info::ConnectionInfo; +use crate::api::connection_info::ConnectionInfo; use crate::common::http::{Query, QueryParam, ReqwestQuery}; /// API Client @@ -13,7 +13,7 @@ impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { Self { connection_info, - base_path: "/api/".to_string(), + base_path: "/api/v1/".to_string(), } } diff --git a/tests/api/v1/mod.rs b/tests/api/v1/mod.rs new file mode 100644 index 000000000..b31e5fe49 --- /dev/null +++ b/tests/api/v1/mod.rs @@ -0,0 +1,3 @@ +pub mod asserts; +pub mod client; +pub mod tests; diff --git a/tests/api/tests/v1/authentication.rs b/tests/api/v1/tests/authentication.rs similarity index 95% rename from tests/api/tests/v1/authentication.rs rename to tests/api/v1/tests/authentication.rs index 5183c8909..5be96e078 100644 --- a/tests/api/tests/v1/authentication.rs +++ b/tests/api/v1/tests/authentication.rs @@ -1,8 +1,8 @@ use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{assert_token_not_valid, assert_unauthorized}; -use crate::api::client::Client; use crate::api::test_environment::running_test_environment; +use crate::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; +use crate::api::v1::client::Client; use crate::common::http::{Query, QueryParam}; #[tokio::test] diff --git a/tests/api/tests/configuration.rs b/tests/api/v1/tests/configuration.rs similarity index 100% rename from tests/api/tests/configuration.rs rename to tests/api/v1/tests/configuration.rs diff --git a/tests/api/tests/v1/context/auth_key.rs b/tests/api/v1/tests/context/auth_key.rs similarity index 99% rename from tests/api/tests/v1/context/auth_key.rs rename to tests/api/v1/tests/context/auth_key.rs index ee7121615..814afeacf 100644 --- a/tests/api/tests/v1/context/auth_key.rs +++ b/tests/api/v1/tests/context/auth_key.rs @@ -3,14 +3,14 @@ use std::time::Duration; use torrust_tracker::tracker::auth::Key; use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{ - assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, -}; -use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::force_database_error; use crate::api::test_environment::running_test_environment; +use crate::api::v1::asserts::{ + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::api::v1::client::Client; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { diff --git a/tests/api/tests/v1/context/mod.rs b/tests/api/v1/tests/context/mod.rs similarity index 100% rename from tests/api/tests/v1/context/mod.rs rename to tests/api/v1/tests/context/mod.rs diff --git a/tests/api/tests/v1/context/stats.rs b/tests/api/v1/tests/context/stats.rs similarity index 94% rename from tests/api/tests/v1/context/stats.rs rename to tests/api/v1/tests/context/stats.rs index 2d9423deb..6f850a62c 100644 --- a/tests/api/tests/v1/context/stats.rs +++ b/tests/api/v1/tests/context/stats.rs @@ -4,10 +4,10 @@ use torrust_tracker::apis::v1::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; -use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::test_environment::running_test_environment; +use crate::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; +use crate::api::v1::client::Client; use crate::common::fixtures::PeerBuilder; #[tokio::test] diff --git a/tests/api/tests/v1/context/torrent.rs b/tests/api/v1/tests/context/torrent.rs similarity index 97% rename from tests/api/tests/v1/context/torrent.rs rename to tests/api/v1/tests/context/torrent.rs index cbe216d6c..8c7031f0e 100644 --- a/tests/api/tests/v1/context/torrent.rs +++ b/tests/api/v1/tests/context/torrent.rs @@ -5,14 +5,14 @@ use torrust_tracker::apis::v1::context::torrent::resources::torrent::{self, Torr use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{ +use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::api::test_environment::running_test_environment; +use crate::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; -use crate::api::client::Client; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::test_environment::running_test_environment; -use crate::api::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; +use crate::api::v1::client::Client; +use crate::api::v1::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; use crate::common::fixtures::PeerBuilder; use crate::common::http::{Query, QueryParam}; diff --git a/tests/api/tests/v1/context/whitelist.rs b/tests/api/v1/tests/context/whitelist.rs similarity index 97% rename from tests/api/tests/v1/context/whitelist.rs rename to tests/api/v1/tests/context/whitelist.rs index 29ea573c0..05cc8c4a7 100644 --- a/tests/api/tests/v1/context/whitelist.rs +++ b/tests/api/v1/tests/context/whitelist.rs @@ -3,15 +3,15 @@ use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::asserts::{ - assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, - assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, -}; -use crate::api::client::Client; use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::api::force_database_error; use crate::api::test_environment::running_test_environment; -use crate::api::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; +use crate::api::v1::asserts::{ + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, + assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::api::v1::client::Client; +use crate::api::v1::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { diff --git a/tests/api/tests/fixtures.rs b/tests/api/v1/tests/fixtures.rs similarity index 100% rename from tests/api/tests/fixtures.rs rename to tests/api/v1/tests/fixtures.rs diff --git a/tests/api/tests/mod.rs b/tests/api/v1/tests/mod.rs similarity index 50% rename from tests/api/tests/mod.rs rename to tests/api/v1/tests/mod.rs index c795e0032..38b4a2b37 100644 --- a/tests/api/tests/mod.rs +++ b/tests/api/v1/tests/mod.rs @@ -1,3 +1,4 @@ +pub mod authentication; pub mod configuration; +pub mod context; pub mod fixtures; -pub mod v1; From 7f4479ac9ad3a0b08df32718cdabb4f118aff5ac Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 12 Mar 2023 12:34:03 +0000 Subject: [PATCH 414/435] refactor(tests): one binary --- tests/http_tracker.rs | 1443 ----------------- tests/integration.rs | 7 + tests/{ => servers}/api/connection_info.rs | 0 tests/{ => servers}/api/mod.rs | 0 tests/{ => servers}/api/test_environment.rs | 0 tests/{ => servers}/api/v1/asserts.rs | 0 tests/{ => servers}/api/v1/client.rs | 2 +- .../api/v1/contract}/authentication.rs | 6 +- .../api/v1/contract}/configuration.rs | 2 +- .../api/v1/contract}/context/auth_key.rs | 10 +- .../api/v1/contract}/context/mod.rs | 0 .../api/v1/contract}/context/stats.rs | 8 +- .../api/v1/contract}/context/torrent.rs | 16 +- .../api/v1/contract}/context/whitelist.rs | 14 +- .../api/v1/contract}/fixtures.rs | 0 .../tests => servers/api/v1/contract}/mod.rs | 0 tests/{ => servers}/api/v1/mod.rs | 2 +- tests/{ => servers}/http/asserts.rs | 2 +- tests/{ => servers}/http/client.rs | 0 tests/{ => servers}/http/connection_info.rs | 0 tests/{ => servers}/http/mod.rs | 1 + tests/{ => servers}/http/requests/announce.rs | 2 +- tests/{ => servers}/http/requests/mod.rs | 0 tests/{ => servers}/http/requests/scrape.rs | 2 +- .../{ => servers}/http/responses/announce.rs | 0 tests/{ => servers}/http/responses/error.rs | 0 tests/{ => servers}/http/responses/mod.rs | 0 tests/{ => servers}/http/responses/scrape.rs | 2 +- tests/{ => servers}/http/test_environment.rs | 0 tests/servers/http/v1/contract.rs | 1425 ++++++++++++++++ tests/servers/http/v1/mod.rs | 1 + tests/servers/mod.rs | 5 + tests/{ => servers}/udp/asserts.rs | 0 tests/{ => servers}/udp/client.rs | 2 +- tests/servers/udp/contract.rs | 160 ++ tests/{ => servers}/udp/mod.rs | 1 + tests/{ => servers}/udp/test_environment.rs | 0 tests/tracker_api.rs | 7 - tests/udp_tracker.rs | 173 -- 39 files changed, 1637 insertions(+), 1656 deletions(-) delete mode 100644 tests/http_tracker.rs create mode 100644 tests/integration.rs rename tests/{ => servers}/api/connection_info.rs (100%) rename tests/{ => servers}/api/mod.rs (100%) rename tests/{ => servers}/api/test_environment.rs (100%) rename tests/{ => servers}/api/v1/asserts.rs (100%) rename tests/{ => servers}/api/v1/client.rs (98%) rename tests/{api/v1/tests => servers/api/v1/contract}/authentication.rs (92%) rename tests/{api/v1/tests => servers/api/v1/contract}/configuration.rs (86%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/auth_key.rs (96%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/mod.rs (100%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/stats.rs (86%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/torrent.rs (95%) rename tests/{api/v1/tests => servers/api/v1/contract}/context/whitelist.rs (95%) rename tests/{api/v1/tests => servers/api/v1/contract}/fixtures.rs (100%) rename tests/{api/v1/tests => servers/api/v1/contract}/mod.rs (100%) rename tests/{ => servers}/api/v1/mod.rs (64%) rename tests/{ => servers}/http/asserts.rs (99%) rename tests/{ => servers}/http/client.rs (100%) rename tests/{ => servers}/http/connection_info.rs (100%) rename tests/{ => servers}/http/mod.rs (98%) rename tests/{ => servers}/http/requests/announce.rs (99%) rename tests/{ => servers}/http/requests/mod.rs (100%) rename tests/{ => servers}/http/requests/scrape.rs (97%) rename tests/{ => servers}/http/responses/announce.rs (100%) rename tests/{ => servers}/http/responses/error.rs (100%) rename tests/{ => servers}/http/responses/mod.rs (100%) rename tests/{ => servers}/http/responses/scrape.rs (99%) rename tests/{ => servers}/http/test_environment.rs (100%) create mode 100644 tests/servers/http/v1/contract.rs create mode 100644 tests/servers/http/v1/mod.rs create mode 100644 tests/servers/mod.rs rename tests/{ => servers}/udp/asserts.rs (100%) rename tests/{ => servers}/udp/client.rs (98%) create mode 100644 tests/servers/udp/contract.rs rename tests/{ => servers}/udp/mod.rs (91%) rename tests/{ => servers}/udp/test_environment.rs (100%) delete mode 100644 tests/tracker_api.rs delete mode 100644 tests/udp_tracker.rs diff --git a/tests/http_tracker.rs b/tests/http_tracker.rs deleted file mode 100644 index 730da93d5..000000000 --- a/tests/http_tracker.rs +++ /dev/null @@ -1,1443 +0,0 @@ -/// Integration tests for HTTP tracker server -/// -/// ```text -/// cargo test `http_tracker_server` -- --nocapture -/// ``` -mod common; -mod http; - -pub type V1 = torrust_tracker::http::v1::launcher::Launcher; - -mod http_tracker { - - mod v1 { - - use torrust_tracker_test_helpers::configuration; - - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn test_environment_should_be_started_and_stopped() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - test_env.stop().await; - } - - mod for_all_config_modes { - - mod and_running_on_reverse_proxy { - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { - // If the tracker is running behind a reverse proxy, the peer IP is the - // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let params = QueryBuilder::default().query().params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let params = QueryBuilder::default().query().params(); - - let response = Client::new(*test_env.bind_address()) - .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") - .await; - - assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_announce_request { - - // Announce request documentation: - // - // BEP 03. The BitTorrent Protocol Specification - // https://www.bittorrent.org/beps/bep_0003.html - // - // BEP 23. Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Announce - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::str::FromStr; - - use local_ip_address::local_ip; - use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_announce_response, assert_bad_announce_request_error_response, - assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, - assert_compact_announce_response, assert_empty_announce_response, assert_is_announce_response, - assert_missing_query_params_for_announce_request_error_response, - }; - use crate::http::client::Client; - use crate::http::requests::announce::{Compact, QueryBuilder}; - use crate::http::responses; - use crate::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.remove_optional_params(); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let response = Client::new(*test_env.bind_address()).get("announce").await; - - assert_missing_query_params_for_announce_request_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_url_query_parameters_are_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let invalid_query_param = "a=b=c"; - - let response = Client::new(*test_env.bind_address()) - .get(&format!("announce?{invalid_query_param}")) - .await; - - assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - // Without `info_hash` param - - let mut params = QueryBuilder::default().query().params(); - - params.info_hash = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "missing param info_hash").await; - - // Without `peer_id` param - - let mut params = QueryBuilder::default().query().params(); - - params.peer_id = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "missing param peer_id").await; - - // Without `port` param - - let mut params = QueryBuilder::default().query().params(); - - params.port = None; - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "missing param port").await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - for invalid_value in &invalid_info_hashes() { - params.set("info_hash", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_cannot_parse_query_params_error_response(response, "").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_fail_when_the_peer_address_param_is_invalid() { - // AnnounceQuery does not even contain the `peer_addr` - // The peer IP is obtained in two ways: - // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. - // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("downloaded", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("uploaded", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "-qB0000000000000000", // 19 bytes - "-qB000000000000000000", // 21 bytes - ]; - - for invalid_value in invalid_values { - params.set("peer_id", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("port", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("left", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_event_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = [ - "0", - "-1", - "1.1", - "a", - "Started", // It should be lowercase to be valid: `started` - "Stopped", // It should be lowercase to be valid: `stopped` - "Completed", // It should be lowercase to be valid: `completed` - ]; - - for invalid_value in invalid_values { - params.set("event", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; - - let mut params = QueryBuilder::default().query().params(); - - let invalid_values = ["-1", "1.1", "a"]; - - for invalid_value in invalid_values { - params.set("compact", invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_bad_announce_request_error_response(response, "invalid param value").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) - .query(), - ) - .await; - - assert_announce_response( - response, - &Announce { - complete: 1, // the peer for this test - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .query(), - ) - .await; - - // It should only contain the previously announced peer - assert_announce_response( - response, - &Announce { - complete: 2, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(previously_announced_peer)], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Announce a peer using IPV4 - let peer_using_ipv4 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) - .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; - - // Announce a peer using IPV6 - let peer_using_ipv6 = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - 8080, - )) - .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; - - // Announce the new Peer. - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000003")) - .query(), - ) - .await; - - // The newly announced peer is not included on the response peer list, - // but all the previously announced peers should be included regardless the IP version they are using. - assert_announce_response( - response, - &Announce { - complete: 3, - incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, - peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], - }, - ) - .await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let peer = PeerBuilder::default().build(); - - // Add a peer - test_env.add_torrent_peer(&info_hash, &peer).await; - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer.peer_id) - .query(); - - assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - - let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; - - assert_empty_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_compact_response() { - // Tracker Returns Compact Peer Lists - // https://www.bittorrent.org/beps/bep_0023.html - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2 accepting compact responses - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .with_compact(Compact::Accepted) - .query(), - ) - .await; - - let expected_response = responses::announce::Compact { - complete: 2, - incomplete: 0, - interval: 120, - min_interval: 120, - peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), - }; - - assert_compact_announce_response(response, &expected_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_return_the_compact_response_by_default() { - // code-review: the HTTP tracker does not return the compact response by default if the "compact" - // param is not provided in the announce URL. The BEP 23 suggest to do so. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - // Peer 1 - let previously_announced_peer = PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .build(); - - // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; - - // Announce the new Peer 2 without passing the "compact" param - // By default it should respond with the compact peer list - // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_id(&peer::Id(*b"-qB00000000000000002")) - .without_compact() - .query(), - ) - .await; - - assert!(!is_a_compact_announce_response(response).await); - - test_env.stop().await; - } - - async fn is_a_compact_announce_response(response: Response) -> bool { - let bytes = response.bytes().await.unwrap(); - let compact_announce = serde_bencode::from_bytes::(&bytes); - compact_announce.is_ok() - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 0); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() - { - // The tracker ignores the peer address in the request param. It uses the client remote ip address. - - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - Client::new(*test_env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 0); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client_ip = local_ip().unwrap(); - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), client_ip); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. - - client <-> tracker <-> Internet - 127.0.0.1 external_ip = "2.137.87.41" - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( - ) { - /* We assume that both the client and tracker share the same public IP. - - client <-> tracker <-> Internet - ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) - .await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); - let client_ip = loopback_ip; - - let client = Client::bind(*test_env.bind_address(), client_ip); - - let announce_query = QueryBuilder::default() - .with_info_hash(&info_hash) - .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) - .query(); - - client.announce(&announce_query).await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); - assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - - test_env.stop().await; - } - - #[tokio::test] - async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( - ) { - /* - client <-> http proxy <-> tracker <-> Internet - ip: header: config: peer addr: - 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 - */ - - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let client = Client::new(*test_env.bind_address()); - - let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - - client - .announce_with_header( - &announce_query, - "X-Forwarded-For", - "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", - ) - .await; - - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; - let peer_addr = peers[0].peer_addr; - - assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - - // Scrape documentation: - // - // BEP 48. Tracker Protocol Extension: Scrape - // https://www.bittorrent.org/beps/bep_0048.html - // - // Vuze (bittorrent client) docs: - // https://wiki.vuze.com/w/Scrape - - use std::net::IpAddr; - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; - use crate::http::asserts::{ - assert_cannot_parse_query_params_error_response, - assert_missing_query_params_for_scrape_request_error_response, assert_scrape_response, - }; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::requests::scrape::QueryBuilder; - use crate::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - //#[tokio::test] - #[allow(dead_code)] - async fn should_fail_when_the_request_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(*test_env.bind_address()).get("scrape").await; - - assert_missing_query_params_for_scrape_request_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let mut params = QueryBuilder::default().query().params(); - - for invalid_value in &invalid_info_hashes() { - params.set_one_info_hash_param(invalid_value); - - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; - - assert_cannot_parse_query_params_error_response(response, "").await; - } - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() - { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 1, - downloaded: 0, - incomplete: 0, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_accept_multiple_infohashes() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .add_info_hash(&info_hash1) - .add_info_hash(&info_hash2) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file(info_hash1.bytes(), File::zeroed()) - .add_file(info_hash2.bytes(), File::zeroed()) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp4_scrapes_handled, 1); - - drop(stats); - - test_env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let stats = test_env.tracker.get_stats().await; - - assert_eq!(stats.tcp6_scrapes_handled, 1); - - drop(stats); - - test_env.stop().await; - } - } - } - - mod configured_as_whitelisted { - - mod and_receiving_an_announce_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_torrent_not_in_whitelist_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - use std::str::FromStr; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::assert_scrape_response; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - test_env - .tracker - .add_torrent_to_whitelist(&info_hash) - .await - .expect("should add the torrent to the whitelist"); - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - } - } - - mod configured_as_private { - - mod and_receiving_an_announce_request { - use std::str::FromStr; - use std::time::Duration; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker_test_helpers::configuration; - - use crate::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; - use crate::http::client::Client; - use crate::http::requests::announce::QueryBuilder; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_respond_to_authenticated_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .announce(&QueryBuilder::default().query()) - .await; - - assert_is_announce_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - let response = Client::new(*test_env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) - .await; - - assert_authentication_error_response(response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let invalid_key = "INVALID_KEY"; - - let response = Client::new(*test_env.bind_address()) - .get(&format!( - "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" - )) - .await; - - assert_authentication_error_response(response).await; - } - - #[tokio::test] - async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - // The tracker does not have this key - let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), unregistered_key) - .announce(&QueryBuilder::default().query()) - .await; - - assert_authentication_error_response(response).await; - - test_env.stop().await; - } - } - - mod receiving_an_scrape_request { - - use std::str::FromStr; - use std::time::Duration; - - use torrust_tracker::protocol::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker::tracker::peer; - use torrust_tracker_test_helpers::configuration; - - use crate::common::fixtures::PeerBuilder; - use crate::http::asserts::{assert_authentication_error_response, assert_scrape_response}; - use crate::http::client::Client; - use crate::http::requests; - use crate::http::responses::scrape::{File, ResponseBuilder}; - use crate::http::test_environment::running_test_environment; - use crate::V1; - - #[tokio::test] - async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let invalid_key = "INVALID_KEY"; - - let response = Client::new(*test_env.bind_address()) - .get(&format!( - "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" - )) - .await; - - assert_authentication_error_response(response).await; - } - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let response = Client::new(*test_env.bind_address()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), key.id()) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default() - .add_file( - info_hash.bytes(), - File { - complete: 0, - downloaded: 0, - incomplete: 1, - }, - ) - .build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - - #[tokio::test] - async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { - // There is not authentication error - // code-review: should this really be this way? - - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; - - let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - - let response = Client::authenticated(*test_env.bind_address(), false_key) - .scrape( - &requests::scrape::QueryBuilder::default() - .with_one_info_hash(&info_hash) - .query(), - ) - .await; - - let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); - - assert_scrape_response(response, &expected_scrape_response).await; - - test_env.stop().await; - } - } - } - - mod configured_as_private_and_whitelisted { - - mod and_receiving_an_announce_request {} - - mod receiving_an_scrape_request {} - } - } -} diff --git a/tests/integration.rs b/tests/integration.rs new file mode 100644 index 000000000..5d66d9074 --- /dev/null +++ b/tests/integration.rs @@ -0,0 +1,7 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` +mod common; +mod servers; diff --git a/tests/api/connection_info.rs b/tests/servers/api/connection_info.rs similarity index 100% rename from tests/api/connection_info.rs rename to tests/servers/api/connection_info.rs diff --git a/tests/api/mod.rs b/tests/servers/api/mod.rs similarity index 100% rename from tests/api/mod.rs rename to tests/servers/api/mod.rs diff --git a/tests/api/test_environment.rs b/tests/servers/api/test_environment.rs similarity index 100% rename from tests/api/test_environment.rs rename to tests/servers/api/test_environment.rs diff --git a/tests/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs similarity index 100% rename from tests/api/v1/asserts.rs rename to tests/servers/api/v1/asserts.rs diff --git a/tests/api/v1/client.rs b/tests/servers/api/v1/client.rs similarity index 98% rename from tests/api/v1/client.rs rename to tests/servers/api/v1/client.rs index d5cdf69f6..2b6db2e77 100644 --- a/tests/api/v1/client.rs +++ b/tests/servers/api/v1/client.rs @@ -1,7 +1,7 @@ use reqwest::Response; -use crate::api::connection_info::ConnectionInfo; use crate::common::http::{Query, QueryParam, ReqwestQuery}; +use crate::servers::api::connection_info::ConnectionInfo; /// API Client pub struct Client { diff --git a/tests/api/v1/tests/authentication.rs b/tests/servers/api/v1/contract/authentication.rs similarity index 92% rename from tests/api/v1/tests/authentication.rs rename to tests/servers/api/v1/contract/authentication.rs index 5be96e078..fb8de1810 100644 --- a/tests/api/v1/tests/authentication.rs +++ b/tests/servers/api/v1/contract/authentication.rs @@ -1,9 +1,9 @@ use torrust_tracker_test_helpers::configuration; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; -use crate::api::v1::client::Client; use crate::common::http::{Query, QueryParam}; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; +use crate::servers::api::v1::client::Client; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { diff --git a/tests/api/v1/tests/configuration.rs b/tests/servers/api/v1/contract/configuration.rs similarity index 86% rename from tests/api/v1/tests/configuration.rs rename to tests/servers/api/v1/contract/configuration.rs index f81201191..e4b608607 100644 --- a/tests/api/v1/tests/configuration.rs +++ b/tests/servers/api/v1/contract/configuration.rs @@ -1,6 +1,6 @@ use torrust_tracker_test_helpers::configuration; -use crate::api::test_environment::stopped_test_environment; +use crate::servers::api::test_environment::stopped_test_environment; #[tokio::test] #[should_panic] diff --git a/tests/api/v1/tests/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs similarity index 96% rename from tests/api/v1/tests/context/auth_key.rs rename to tests/servers/api/v1/contract/context/auth_key.rs index 814afeacf..a99272e84 100644 --- a/tests/api/v1/tests/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -3,14 +3,14 @@ use std::time::Duration; use torrust_tracker::tracker::auth::Key; use torrust_tracker_test_helpers::configuration; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::force_database_error; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{ +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::force_database_error; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, }; -use crate::api::v1::client::Client; +use crate::servers::api::v1::client::Client; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { diff --git a/tests/api/v1/tests/context/mod.rs b/tests/servers/api/v1/contract/context/mod.rs similarity index 100% rename from tests/api/v1/tests/context/mod.rs rename to tests/servers/api/v1/contract/context/mod.rs diff --git a/tests/api/v1/tests/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs similarity index 86% rename from tests/api/v1/tests/context/stats.rs rename to tests/servers/api/v1/contract/context/stats.rs index 6f850a62c..3929a4270 100644 --- a/tests/api/v1/tests/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -4,11 +4,11 @@ use torrust_tracker::apis::v1::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; -use crate::api::v1::client::Client; use crate::common::fixtures::PeerBuilder; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; +use crate::servers::api::v1::client::Client; #[tokio::test] async fn should_allow_getting_tracker_statistics() { diff --git a/tests/api/v1/tests/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs similarity index 95% rename from tests/api/v1/tests/context/torrent.rs rename to tests/servers/api/v1/contract/context/torrent.rs index 8c7031f0e..702a8bcd4 100644 --- a/tests/api/v1/tests/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -5,16 +5,18 @@ use torrust_tracker::apis::v1::context::torrent::resources::torrent::{self, Torr use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{ +use crate::common::fixtures::PeerBuilder; +use crate::common::http::{Query, QueryParam}; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; -use crate::api::v1::client::Client; -use crate::api::v1::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; -use crate::common::fixtures::PeerBuilder; -use crate::common::http::{Query, QueryParam}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::contract::fixtures::{ + invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, +}; #[tokio::test] async fn should_allow_getting_torrents() { diff --git a/tests/api/v1/tests/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs similarity index 95% rename from tests/api/v1/tests/context/whitelist.rs rename to tests/servers/api/v1/contract/context/whitelist.rs index 05cc8c4a7..67992642f 100644 --- a/tests/api/v1/tests/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -3,15 +3,17 @@ use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use crate::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::api::force_database_error; -use crate::api::test_environment::running_test_environment; -use crate::api::v1::asserts::{ +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::force_database_error; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, }; -use crate::api::v1::client::Client; -use crate::api::v1::tests::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::contract::fixtures::{ + invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, +}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { diff --git a/tests/api/v1/tests/fixtures.rs b/tests/servers/api/v1/contract/fixtures.rs similarity index 100% rename from tests/api/v1/tests/fixtures.rs rename to tests/servers/api/v1/contract/fixtures.rs diff --git a/tests/api/v1/tests/mod.rs b/tests/servers/api/v1/contract/mod.rs similarity index 100% rename from tests/api/v1/tests/mod.rs rename to tests/servers/api/v1/contract/mod.rs diff --git a/tests/api/v1/mod.rs b/tests/servers/api/v1/mod.rs similarity index 64% rename from tests/api/v1/mod.rs rename to tests/servers/api/v1/mod.rs index b31e5fe49..37298b377 100644 --- a/tests/api/v1/mod.rs +++ b/tests/servers/api/v1/mod.rs @@ -1,3 +1,3 @@ pub mod asserts; pub mod client; -pub mod tests; +pub mod contract; diff --git a/tests/http/asserts.rs b/tests/servers/http/asserts.rs similarity index 99% rename from tests/http/asserts.rs rename to tests/servers/http/asserts.rs index 932b48be4..3a2e67bf0 100644 --- a/tests/http/asserts.rs +++ b/tests/servers/http/asserts.rs @@ -4,7 +4,7 @@ use reqwest::Response; use super::responses::announce::{Announce, Compact, DeserializedCompact}; use super::responses::scrape; -use crate::http::responses::error::Error; +use crate::servers::http::responses::error::Error; pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { let error_failure_reason = serde_bencode::from_str::(response_text) diff --git a/tests/http/client.rs b/tests/servers/http/client.rs similarity index 100% rename from tests/http/client.rs rename to tests/servers/http/client.rs diff --git a/tests/http/connection_info.rs b/tests/servers/http/connection_info.rs similarity index 100% rename from tests/http/connection_info.rs rename to tests/servers/http/connection_info.rs diff --git a/tests/http/mod.rs b/tests/servers/http/mod.rs similarity index 98% rename from tests/http/mod.rs rename to tests/servers/http/mod.rs index b0d896c99..cb2885df0 100644 --- a/tests/http/mod.rs +++ b/tests/servers/http/mod.rs @@ -3,6 +3,7 @@ pub mod client; pub mod requests; pub mod responses; pub mod test_environment; +pub mod v1; use percent_encoding::NON_ALPHANUMERIC; diff --git a/tests/http/requests/announce.rs b/tests/servers/http/requests/announce.rs similarity index 99% rename from tests/http/requests/announce.rs rename to tests/servers/http/requests/announce.rs index 87aa3425f..414c118ef 100644 --- a/tests/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -6,7 +6,7 @@ use serde_repr::Serialize_repr; use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; -use crate::http::{percent_encode_byte_array, ByteArray20}; +use crate::servers::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: ByteArray20, diff --git a/tests/http/requests/mod.rs b/tests/servers/http/requests/mod.rs similarity index 100% rename from tests/http/requests/mod.rs rename to tests/servers/http/requests/mod.rs diff --git a/tests/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs similarity index 97% rename from tests/http/requests/scrape.rs rename to tests/servers/http/requests/scrape.rs index 979dad540..d7f7cd581 100644 --- a/tests/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use torrust_tracker::protocol::info_hash::InfoHash; -use crate::http::{percent_encode_byte_array, ByteArray20}; +use crate::servers::http::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: Vec, diff --git a/tests/http/responses/announce.rs b/tests/servers/http/responses/announce.rs similarity index 100% rename from tests/http/responses/announce.rs rename to tests/servers/http/responses/announce.rs diff --git a/tests/http/responses/error.rs b/tests/servers/http/responses/error.rs similarity index 100% rename from tests/http/responses/error.rs rename to tests/servers/http/responses/error.rs diff --git a/tests/http/responses/mod.rs b/tests/servers/http/responses/mod.rs similarity index 100% rename from tests/http/responses/mod.rs rename to tests/servers/http/responses/mod.rs diff --git a/tests/http/responses/scrape.rs b/tests/servers/http/responses/scrape.rs similarity index 99% rename from tests/http/responses/scrape.rs rename to tests/servers/http/responses/scrape.rs index 1aea517cf..221ff0a38 100644 --- a/tests/http/responses/scrape.rs +++ b/tests/servers/http/responses/scrape.rs @@ -4,7 +4,7 @@ use std::str; use serde::{self, Deserialize, Serialize}; use serde_bencode::value::Value; -use crate::http::{ByteArray20, InfoHash}; +use crate::servers::http::{ByteArray20, InfoHash}; #[derive(Debug, PartialEq, Default)] pub struct Response { diff --git a/tests/http/test_environment.rs b/tests/servers/http/test_environment.rs similarity index 100% rename from tests/http/test_environment.rs rename to tests/servers/http/test_environment.rs diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs new file mode 100644 index 000000000..eda42f1ee --- /dev/null +++ b/tests/servers/http/v1/contract.rs @@ -0,0 +1,1425 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::servers::http::test_environment::running_test_environment; + +pub type V1 = torrust_tracker::http::v1::launcher::Launcher; + +#[tokio::test] +async fn test_environment_should_be_started_and_stopped() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + test_env.stop().await; +} + +mod for_all_config_modes { + + mod and_running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + // If the tracker is running behind a reverse proxy, the peer IP is the + // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. + + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(*test_env.bind_address()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; + + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; + } + } + + mod receiving_an_announce_request { + + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::str::FromStr; + + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::servers::http::asserts::{ + assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, + assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, + assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, + }; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::{Compact, QueryBuilder}; + use crate::servers::http::responses; + use crate::servers::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_respond_if_only_the_mandatory_fields_are_provided() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + params.remove_optional_params(); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let response = Client::new(*test_env.bind_address()).get("announce").await; + + assert_missing_query_params_for_announce_request_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_url_query_parameters_are_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let invalid_query_param = "a=b=c"; + + let response = Client::new(*test_env.bind_address()) + .get(&format!("announce?{invalid_query_param}")) + .await; + + assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_a_mandatory_field_is_missing() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + // Without `info_hash` param + + let mut params = QueryBuilder::default().query().params(); + + params.info_hash = None; + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param info_hash").await; + + // Without `peer_id` param + + let mut params = QueryBuilder::default().query().params(); + + params.peer_id = None; + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param peer_id").await; + + // Without `port` param + + let mut params = QueryBuilder::default().query().params(); + + params.port = None; + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param port").await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_cannot_parse_query_params_error_response(response, "").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. + + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; + + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_port_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("port", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_left_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("left", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_event_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase to be valid: `started` + "Stopped", // It should be lowercase to be valid: `stopped` + "Completed", // It should be lowercase to be valid: `completed` + ]; + + for invalid_value in invalid_values { + params.set("event", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_compact_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("compact", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .query(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![], + }, + ) + .await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2. This new peer is non included on the response peer list + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .query(), + ) + .await; + + // It should only contain the previously announced peer + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(previously_announced_peer)], + }, + ) + .await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Announce a peer using IPV4 + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + + // Announce a peer using IPV6 + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + )) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + + // Announce the new Peer. + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000003")) + .query(), + ) + .await; + + // The newly announced peer is not included on the response peer list, + // but all the previously announced peers should be included regardless the IP version they are using. + assert_announce_response( + response, + &Announce { + complete: 3, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], + }, + ) + .await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().build(); + + // Add a peer + test_env.add_torrent_peer(&info_hash, &peer).await; + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .query(); + + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + + let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; + + assert_empty_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 accepting compact responses + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; + + let expected_response = responses::announce::Compact { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; + + assert_compact_announce_response(response, &expected_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; + + assert!(!is_a_compact_announce_response(response).await); + + test_env.stop().await; + } + + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 0); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 0); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); + + let client = Client::bind(*test_env.bind_address(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ + + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(*test_env.bind_address(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ + + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(*test_env.bind_address(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; + } + + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let client = Client::new(*test_env.bind_address()); + + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); + + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + + test_env.stop().await; + } + } + + mod receiving_an_scrape_request { + + // Scrape documentation: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape + + use std::net::IpAddr; + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::servers::http::asserts::{ + assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, + assert_scrape_response, + }; + use crate::servers::http::client::Client; + use crate::servers::http::requests; + use crate::servers::http::requests::scrape::QueryBuilder; + use crate::servers::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let response = Client::new(*test_env.bind_address()).get("scrape").await; + + assert_missing_query_params_for_scrape_request_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_cannot_parse_query_params_error_response(response, "").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_accept_multiple_infohashes() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; + } + } +} + +mod configured_as_whitelisted { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_torrent_not_in_whitelist_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + } + + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::PeerBuilder; + use crate::servers::http::asserts::assert_scrape_response; + use crate::servers::http::client::Client; + use crate::servers::http::requests; + use crate::servers::http::responses::scrape::{File, ResponseBuilder}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + } +} + +mod configured_as_private { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_respond_to_authenticated_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), key.id()) + .announce(&QueryBuilder::default().query()) + .await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_authentication_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let invalid_key = "INVALID_KEY"; + + let response = Client::new(*test_env.bind_address()) + .get(&format!( + "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + // The tracker does not have this key + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), unregistered_key) + .announce(&QueryBuilder::default().query()) + .await; + + assert_authentication_error_response(response).await; + + test_env.stop().await; + } + } + + mod receiving_an_scrape_request { + + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::PeerBuilder; + use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests; + use crate::servers::http::responses::scrape::{File, ResponseBuilder}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let invalid_key = "INVALID_KEY"; + + let response = Client::new(*test_env.bind_address()) + .get(&format!( + "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), key.id()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + // code-review: should this really be this way? + + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), false_key) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + } +} + +mod configured_as_private_and_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} +} diff --git a/tests/servers/http/v1/mod.rs b/tests/servers/http/v1/mod.rs new file mode 100644 index 000000000..2943dbb50 --- /dev/null +++ b/tests/servers/http/v1/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs new file mode 100644 index 000000000..c19f72020 --- /dev/null +++ b/tests/servers/mod.rs @@ -0,0 +1,5 @@ +extern crate rand; + +mod api; +mod http; +mod udp; diff --git a/tests/udp/asserts.rs b/tests/servers/udp/asserts.rs similarity index 100% rename from tests/udp/asserts.rs rename to tests/servers/udp/asserts.rs diff --git a/tests/udp/client.rs b/tests/servers/udp/client.rs similarity index 98% rename from tests/udp/client.rs rename to tests/servers/udp/client.rs index 0bec03d03..a13845b97 100644 --- a/tests/udp/client.rs +++ b/tests/servers/udp/client.rs @@ -5,7 +5,7 @@ use aquatic_udp_protocol::{Request, Response}; use tokio::net::UdpSocket; use torrust_tracker::udp::MAX_PACKET_SIZE; -use crate::udp::source_address; +use crate::servers::udp::source_address; #[allow(clippy::module_name_repetitions)] pub struct UdpClient { diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs new file mode 100644 index 000000000..311cf5e49 --- /dev/null +++ b/tests/servers/udp/contract.rs @@ -0,0 +1,160 @@ +// UDP tracker documentation: +// +// BEP 15. UDP Tracker Protocol for BitTorrent +// https://www.bittorrent.org/beps/bep_0015.html + +use core::panic; + +use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; +use torrust_tracker::udp::MAX_PACKET_SIZE; +use torrust_tracker_test_helpers::configuration; + +use crate::servers::udp::asserts::is_error_response; +use crate::servers::udp::client::{new_udp_client_connected, UdpTrackerClient}; +use crate::servers::udp::test_environment::running_test_environment; + +fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] +} + +fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] +} + +async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { + let connect_request = ConnectRequest { transaction_id }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + } +} + +#[tokio::test] +async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; + + client.send(&empty_udp_request()).await; + + let mut buffer = empty_buffer(); + client.receive(&mut buffer).await; + let response = Response::from_bytes(&buffer, true).unwrap(); + + assert!(is_error_response(&response, "bad request")); +} + +mod receiving_a_connection_request { + use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::asserts::is_connect_response; + use crate::servers::udp::client::new_udp_tracker_client_connected; + use crate::servers::udp::test_environment::running_test_environment; + + #[tokio::test] + async fn should_return_a_connect_response() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + assert!(is_connect_response(&response, TransactionId(123))); + } +} + +mod receiving_an_announce_request { + use std::net::Ipv4Addr; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, + TransactionId, + }; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::asserts::is_ipv4_announce_response; + use crate::servers::udp::client::new_udp_tracker_client_connected; + use crate::servers::udp::contract::send_connection_request; + use crate::servers::udp::test_environment::running_test_environment; + + #[tokio::test] + async fn should_return_an_announce_response() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send announce request + + let announce_request = AnnounceRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hash: InfoHash([0u8; 20]), + peer_id: PeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client.udp_client.socket.local_addr().unwrap().port()), + }; + + client.send(announce_request.into()).await; + + let response = client.receive().await; + + assert!(is_ipv4_announce_response(&response)); + } +} + +mod receiving_an_scrape_request { + use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::asserts::is_scrape_response; + use crate::servers::udp::client::new_udp_tracker_client_connected; + use crate::servers::udp::contract::send_connection_request; + use crate::servers::udp::test_environment::running_test_environment; + + #[tokio::test] + async fn should_return_a_scrape_response() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send scrape request + + // Full scrapes are not allowed you need to pass an array of info hashes otherwise + // it will return "bad request" error with empty vector + let info_hashes = vec![InfoHash([0u8; 20])]; + + let scrape_request = ScrapeRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hashes, + }; + + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + assert!(is_scrape_response(&response)); + } +} diff --git a/tests/udp/mod.rs b/tests/servers/udp/mod.rs similarity index 91% rename from tests/udp/mod.rs rename to tests/servers/udp/mod.rs index f45a4a4f9..d39c37153 100644 --- a/tests/udp/mod.rs +++ b/tests/servers/udp/mod.rs @@ -1,5 +1,6 @@ pub mod asserts; pub mod client; +pub mod contract; pub mod test_environment; /// Generates the source address for the UDP client diff --git a/tests/udp/test_environment.rs b/tests/servers/udp/test_environment.rs similarity index 100% rename from tests/udp/test_environment.rs rename to tests/servers/udp/test_environment.rs diff --git a/tests/tracker_api.rs b/tests/tracker_api.rs deleted file mode 100644 index 3219bc987..000000000 --- a/tests/tracker_api.rs +++ /dev/null @@ -1,7 +0,0 @@ -/// Integration tests for the tracker API -/// -/// ```text -/// cargo test --test tracker_api -/// ``` -mod api; -mod common; diff --git a/tests/udp_tracker.rs b/tests/udp_tracker.rs deleted file mode 100644 index 3fe78c03d..000000000 --- a/tests/udp_tracker.rs +++ /dev/null @@ -1,173 +0,0 @@ -/// Integration tests for UDP tracker server -/// -/// ```text -/// cargo test `udp_tracker_server` -- --nocapture -/// ``` -extern crate rand; - -mod common; -mod udp; - -mod udp_tracker_server { - - // UDP tracker documentation: - // - // BEP 15. UDP Tracker Protocol for BitTorrent - // https://www.bittorrent.org/beps/bep_0015.html - - use core::panic; - - use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; - use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker_test_helpers::configuration; - - use crate::udp::asserts::is_error_response; - use crate::udp::client::{new_udp_client_connected, UdpTrackerClient}; - use crate::udp::test_environment::running_test_environment; - - fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] - } - - fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] - } - - async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { - let connect_request = ConnectRequest { transaction_id }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - match response { - Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), - } - } - - #[tokio::test] - async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; - - client.send(&empty_udp_request()).await; - - let mut buffer = empty_buffer(); - client.receive(&mut buffer).await; - let response = Response::from_bytes(&buffer, true).unwrap(); - - assert!(is_error_response(&response, "bad request")); - } - - mod receiving_a_connection_request { - use aquatic_udp_protocol::{ConnectRequest, TransactionId}; - use torrust_tracker_test_helpers::configuration; - - use crate::udp::asserts::is_connect_response; - use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::test_environment::running_test_environment; - - #[tokio::test] - async fn should_return_a_connect_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; - - let connect_request = ConnectRequest { - transaction_id: TransactionId(123), - }; - - client.send(connect_request.into()).await; - - let response = client.receive().await; - - assert!(is_connect_response(&response, TransactionId(123))); - } - } - - mod receiving_an_announce_request { - use std::net::Ipv4Addr; - - use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, - TransactionId, - }; - use torrust_tracker_test_helpers::configuration; - - use crate::udp::asserts::is_ipv4_announce_response; - use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::test_environment::running_test_environment; - use crate::udp_tracker_server::send_connection_request; - - #[tokio::test] - async fn should_return_an_announce_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; - - let connection_id = send_connection_request(TransactionId(123), &client).await; - - // Send announce request - - let announce_request = AnnounceRequest { - connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), - info_hash: InfoHash([0u8; 20]), - peer_id: PeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client.udp_client.socket.local_addr().unwrap().port()), - }; - - client.send(announce_request.into()).await; - - let response = client.receive().await; - - assert!(is_ipv4_announce_response(&response)); - } - } - - mod receiving_an_scrape_request { - use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; - use torrust_tracker_test_helpers::configuration; - - use crate::udp::asserts::is_scrape_response; - use crate::udp::client::new_udp_tracker_client_connected; - use crate::udp::test_environment::running_test_environment; - use crate::udp_tracker_server::send_connection_request; - - #[tokio::test] - async fn should_return_a_scrape_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; - - let connection_id = send_connection_request(TransactionId(123), &client).await; - - // Send scrape request - - // Full scrapes are not allowed you need to pass an array of info hashes otherwise - // it will return "bad request" error with empty vector - let info_hashes = vec![InfoHash([0u8; 20])]; - - let scrape_request = ScrapeRequest { - connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), - info_hashes, - }; - - client.send(scrape_request.into()).await; - - let response = client.receive().await; - - assert!(is_scrape_response(&response)); - } - } -} From fd50bb000451864b36400f34e6625d7feaab6053 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 13 Mar 2023 19:11:56 +0000 Subject: [PATCH 415/435] refactor(tracker): use domain struts in DB trait instead of primitive types. --- src/apis/v1/context/auth_key/handlers.rs | 2 +- src/databases/mod.rs | 16 +++++----------- src/databases/mysql.rs | 16 +++++++++------- src/databases/sqlite.rs | 14 +++++++------- src/protocol/info_hash.rs | 12 ++++++++++++ src/tracker/mod.rs | 11 +++++------ 6 files changed, 39 insertions(+), 32 deletions(-) diff --git a/src/apis/v1/context/auth_key/handlers.rs b/src/apis/v1/context/auth_key/handlers.rs index d21f08299..cb1cd1113 100644 --- a/src/apis/v1/context/auth_key/handlers.rs +++ b/src/apis/v1/context/auth_key/handlers.rs @@ -31,7 +31,7 @@ pub async fn delete_auth_key_handler( ) -> Response { match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), - Ok(key) => match tracker.remove_auth_key(&key.to_string()).await { + Ok(key) => match tracker.remove_auth_key(&key).await { Ok(_) => ok_response(), Err(e) => failed_to_delete_key_response(e), }, diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 247f571d7..0af6f5723 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use self::error::Error; use crate::protocol::info_hash::InfoHash; -use crate::tracker::auth; +use crate::tracker::auth::{self, Key}; pub(self) struct Builder where @@ -63,25 +63,19 @@ pub trait Database: Sync + Send { async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; - // todo: replace type `&str` with `&InfoHash` - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error>; + async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error>; async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - // todo: replace type `&str` with `&Key` - async fn get_key_from_keys(&self, key: &str) -> Result, Error>; + async fn get_key_from_keys(&self, key: &Key) -> Result, Error>; async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; - // todo: replace type `&str` with `&Key` - async fn remove_key_from_keys(&self, key: &str) -> Result; + async fn remove_key_from_keys(&self, key: &Key) -> Result; async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - Ok(self - .get_info_hash_from_whitelist(&info_hash.clone().to_string()) - .await? - .is_some()) + Ok(self.get_info_hash_from_whitelist(info_hash).await?.is_some()) } } diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index f0c7ec1dd..f6918974f 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -147,12 +147,12 @@ impl Database for Mysql { Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error> { + async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let select = conn.exec_first::( "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", - params! { info_hash }, + params! { "info_hash" => info_hash.to_hex_string() }, )?; let info_hash = select.map(|f| InfoHash::from_str(&f).expect("Failed to decode InfoHash String from DB!")); @@ -183,11 +183,13 @@ impl Database for Mysql { Ok(1) } - async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let query = - conn.exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }); + let query = conn.exec_first::<(String, i64), _, _>( + "SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", + params! { "key" => key.to_string() }, + ); let key = query?; @@ -211,10 +213,10 @@ impl Database for Mysql { Ok(1) } - async fn remove_key_from_keys(&self, key: &str) -> Result { + async fn remove_key_from_keys(&self, key: &Key) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key })?; + conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { "key" => key.to_string() })?; Ok(1) } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 4bf2931de..adb201def 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -156,12 +156,12 @@ impl Database for Sqlite { } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result, Error> { + async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; - let mut rows = stmt.query([info_hash])?; + let mut rows = stmt.query([info_hash.to_hex_string()])?; let query = rows.next()?; @@ -200,7 +200,7 @@ impl Database for Sqlite { } } - async fn get_key_from_keys(&self, key: &str) -> Result, Error> { + async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -211,9 +211,9 @@ impl Database for Sqlite { Ok(key.map(|f| { let expiry: i64 = f.get(1).unwrap(); - let id: String = f.get(0).unwrap(); + let key: String = f.get(0).unwrap(); auth::ExpiringKey { - key: id.parse::().unwrap(), + key: key.parse::().unwrap(), valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), } })) @@ -237,10 +237,10 @@ impl Database for Sqlite { } } - async fn remove_key_from_keys(&self, key: &str) -> Result { + async fn remove_key_from_keys(&self, key: &Key) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key])?; + let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key.to_string()])?; if deleted == 1 { // should only remove a single record. diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 320636725..fd7602cdd 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -24,6 +24,11 @@ impl InfoHash { pub fn bytes(&self) -> [u8; 20] { self.0 } + + #[must_use] + pub fn to_hex_string(&self) -> String { + self.to_string() + } } impl std::fmt::Display for InfoHash { @@ -197,6 +202,13 @@ mod tests { assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); } + #[test] + fn an_info_hash_should_return_its_a_40_utf8_lowercased_char_hex_representations_as_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + assert_eq!(info_hash.to_hex_string(), "ffffffffffffffffffffffffffffffffffffffff"); + } + #[test] fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { let info_hash: InfoHash = [255u8; 20].as_slice().into(); diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 326afbf00..8a9739793 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -202,10 +202,9 @@ impl Tracker { /// # Panics /// /// Will panic if key cannot be converted into a valid `Key`. - pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { - // todo: change argument `key: &str` to `key: &Key` + pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key).await?; - self.keys.write().await.remove(&key.parse::().unwrap()); + self.keys.write().await.remove(key); Ok(()) } @@ -1175,12 +1174,12 @@ mod tests { async fn it_should_remove_an_authentication_key() { let tracker = private_tracker(); - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - let result = tracker.remove_auth_key(&key.id().to_string()).await; + let result = tracker.remove_auth_key(&expiring_key.id()).await; assert!(result.is_ok()); - assert!(tracker.verify_auth_key(&key.id()).await.is_err()); + assert!(tracker.verify_auth_key(&expiring_key.id()).await.is_err()); } #[tokio::test] From 084b2acfb378c5df795644ea20a7adf30dd0c2d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 14 Mar 2023 13:28:27 +0000 Subject: [PATCH 416/435] feat(api): [#120] use datetime ISO 8601 in auth key enpoint instead of timestamp. --- src/apis/v1/context/auth_key/resources.rs | 53 ++++++---- src/protocol/clock/mod.rs | 70 +++++++++++++ src/tracker/auth.rs | 117 ++++++++++++++-------- src/tracker/mod.rs | 18 ++-- tests/servers/http/v1/contract.rs | 8 +- 5 files changed, 190 insertions(+), 76 deletions(-) diff --git a/src/apis/v1/context/auth_key/resources.rs b/src/apis/v1/context/auth_key/resources.rs index 72ef32a95..cf43a6f3d 100644 --- a/src/apis/v1/context/auth_key/resources.rs +++ b/src/apis/v1/context/auth_key/resources.rs @@ -2,25 +2,21 @@ use std::convert::From; use serde::{Deserialize, Serialize}; -use crate::protocol::clock::DurationSinceUnixEpoch; +use crate::protocol::clock::convert_from_iso_8601_to_timestamp; use crate::tracker::auth::{self, Key}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { - pub key: String, // todo: rename to `id` (API breaking change!) - pub valid_until: Option, // todo: `auth::ExpiringKey` has now always a value (API breaking change!) + pub key: String, + pub valid_until: u64, // todo: remove when the torrust-index-backend starts using the `expiry_time` attribute. + pub expiry_time: String, } impl From for auth::ExpiringKey { fn from(auth_key_resource: AuthKey) -> Self { - let valid_until = match auth_key_resource.valid_until { - Some(valid_until) => DurationSinceUnixEpoch::from_secs(valid_until), - None => DurationSinceUnixEpoch::from_secs(0), - }; - auth::ExpiringKey { key: auth_key_resource.key.parse::().unwrap(), - valid_until, + valid_until: convert_from_iso_8601_to_timestamp(&auth_key_resource.expiry_time), } } } @@ -29,7 +25,8 @@ impl From for AuthKey { fn from(auth_key: auth::ExpiringKey) -> Self { AuthKey { key: auth_key.key.to_string(), - valid_until: Some(auth_key.valid_until.as_secs()), + valid_until: auth_key.valid_until.as_secs(), + expiry_time: auth_key.expiry_time().to_string(), } } } @@ -42,38 +39,53 @@ mod tests { use crate::protocol::clock::{Current, TimeNow}; use crate::tracker::auth::{self, Key}; + struct TestTime { + pub timestamp: u64, + pub iso_8601_v1: String, + pub iso_8601_v2: String, + } + + fn one_hour_after_unix_epoch() -> TestTime { + let timestamp = 60_u64; + let iso_8601_v1 = "1970-01-01T00:01:00.000Z".to_string(); + let iso_8601_v2 = "1970-01-01 00:01:00 UTC".to_string(); + TestTime { + timestamp, + iso_8601_v1, + iso_8601_v2, + } + } + #[test] fn it_should_be_convertible_into_an_auth_key() { - let duration_in_secs = 60; - let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(duration_in_secs), + valid_until: one_hour_after_unix_epoch().timestamp, + expiry_time: one_hour_after_unix_epoch().iso_8601_v1, }; assert_eq!( auth::ExpiringKey::from(auth_key_resource), auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap() + valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() } ); } #[test] fn it_should_be_convertible_from_an_auth_key() { - let duration_in_secs = 60; - let auth_key = auth::ExpiringKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(duration_in_secs, 0)).unwrap(), + valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), }; assert_eq!( AuthKey::from(auth_key), AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(duration_in_secs) + valid_until: one_hour_after_unix_epoch().timestamp, + expiry_time: one_hour_after_unix_epoch().iso_8601_v2, } ); } @@ -83,10 +95,11 @@ mod tests { assert_eq!( serde_json::to_string(&AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(60) + valid_until: one_hour_after_unix_epoch().timestamp, + expiry_time: one_hour_after_unix_epoch().iso_8601_v1, }) .unwrap(), - "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60}" // cspell:disable-line + "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60,\"expiry_time\":\"1970-01-01T00:01:00.000Z\"}" // cspell:disable-line ); } } diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs index 7868d4c5e..73df37b58 100644 --- a/src/protocol/clock/mod.rs +++ b/src/protocol/clock/mod.rs @@ -1,6 +1,9 @@ use std::num::IntErrorKind; +use std::str::FromStr; use std::time::Duration; +use chrono::{DateTime, NaiveDateTime, Utc}; + pub type DurationSinceUnixEpoch = Duration; #[derive(Debug)] @@ -36,6 +39,40 @@ pub trait TimeNow: Time { } } +/// # Panics +/// +/// Will panic if the input time cannot be converted to `DateTime::`. +/// +#[must_use] +pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { + convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) +} + +/// # Panics +/// +/// Will panic if the input time overflows the u64 type. +/// +#[must_use] +pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) +} + +/// # Panics +/// +/// Will panic if the input time overflows the i64 type. +/// +#[must_use] +pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { + DateTime::::from_utc( + NaiveDateTime::from_timestamp_opt( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ) + .unwrap(), + Utc, + ) +} + #[cfg(test)] mod tests { use std::any::TypeId; @@ -54,6 +91,39 @@ mod tests { assert_ne!(TypeId::of::(), TypeId::of::()); assert_ne!(Stopped::now(), Working::now()); } + + mod timestamp { + use chrono::{DateTime, NaiveDateTime, Utc}; + + use crate::protocol::clock::{ + convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, + DurationSinceUnixEpoch, + }; + + #[test] + fn should_be_converted_to_datetime_utc() { + let timestamp = DurationSinceUnixEpoch::ZERO; + assert_eq!( + convert_from_timestamp_to_datetime_utc(timestamp), + DateTime::::from_utc(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc) + ); + } + + #[test] + fn should_be_converted_from_datetime_utc() { + let datetime = DateTime::::from_utc(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc); + assert_eq!( + convert_from_datetime_utc_to_timestamp(&datetime), + DurationSinceUnixEpoch::ZERO + ); + } + + #[test] + fn should_be_converted_from_datetime_utc_in_iso_8601() { + let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); + assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); + } + } } mod working_clock { diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index e3c12a828..75bc543a8 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -3,7 +3,6 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use chrono::{DateTime, NaiveDateTime, Utc}; use derive_more::Display; use log::debug; use rand::distributions::Alphanumeric; @@ -12,7 +11,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_located_error::LocatedError; -use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::protocol::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; use crate::protocol::common::AUTH_KEY_LENGTH; #[must_use] @@ -59,27 +58,28 @@ pub struct ExpiringKey { impl std::fmt::Display for ExpiringKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "key: `{}`, valid until `{}`", - self.key, - DateTime::::from_utc( - NaiveDateTime::from_timestamp_opt( - i64::try_from(self.valid_until.as_secs()).expect("Overflow of i64 seconds, very future!"), - self.valid_until.subsec_nanos(), - ) - .unwrap(), - Utc - ) - ) + write!(f, "key: `{}`, valid until `{}`", self.key, self.expiry_time()) } } impl ExpiringKey { #[must_use] - pub fn id(&self) -> Key { + pub fn key(&self) -> Key { self.key.clone() } + + /// It returns the expiry time. For example, for the starting time for Unix Epoch + /// (timestamp 0) it will return a `DateTime` whose string representation is + /// `1970-01-01 00:00:00 UTC`. + /// + /// # Panics + /// + /// Will panic when the key timestamp overflows the ui64 type. + /// + #[must_use] + pub fn expiry_time(&self) -> chrono::DateTime { + convert_from_timestamp_to_datetime_utc(self.valid_until) + } } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] @@ -126,44 +126,75 @@ impl From for Error { #[cfg(test)] mod tests { - use std::str::FromStr; - use std::time::Duration; - use crate::protocol::clock::{Current, StoppedTime}; - use crate::tracker::auth; + mod key { + use std::str::FromStr; - #[test] - fn auth_key_from_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = auth::Key::from_str(key_string); + use crate::tracker::auth::Key; - assert!(auth_key.is_ok()); - assert_eq!(auth_key.unwrap().to_string(), key_string); + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let key = Key::from_str(key_string); + + assert!(key.is_ok()); + assert_eq!(key.unwrap().to_string(), key_string); + } } - #[test] - fn generate_valid_auth_key() { - let auth_key = auth::generate(Duration::new(9999, 0)); + mod expiring_auth_key { + use std::str::FromStr; + use std::time::Duration; - assert!(auth::verify(&auth_key).is_ok()); - } + use crate::protocol::clock::{Current, StoppedTime}; + use crate::tracker::auth; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let auth_key = auth::Key::from_str(key_string); - #[test] - fn generate_and_check_expired_auth_key() { - // Set the time to the current time. - Current::local_set_to_system_time_now(); + assert!(auth_key.is_ok()); + assert_eq!(auth_key.unwrap().to_string(), key_string); + } - // Make key that is valid for 19 seconds. - let auth_key = auth::generate(Duration::from_secs(19)); + #[test] + fn should_be_displayed() { + // Set the time to the current time. + Current::local_set_to_unix_epoch(); - // Mock the time has passed 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + let expiring_key = auth::generate(Duration::from_secs(0)); - assert!(auth::verify(&auth_key).is_ok()); + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line + ); + } - // Mock the time has passed another 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); + #[test] + fn should_be_generated_with_a_expiration_time() { + let expiring_key = auth::generate(Duration::new(9999, 0)); - assert!(auth::verify(&auth_key).is_err()); + assert!(auth::verify(&expiring_key).is_ok()); + } + + #[test] + fn should_be_generate_and_verified() { + // Set the time to the current time. + Current::local_set_to_system_time_now(); + + // Make key that is valid for 19 seconds. + let expiring_key = auth::generate(Duration::from_secs(19)); + + // Mock the time has passed 10 sec. + Current::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify(&expiring_key).is_ok()); + + // Mock the time has passed another 10 sec. + Current::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify(&expiring_key).is_err()); + } } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 8a9739793..71bb41f90 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1132,9 +1132,9 @@ mod tests { async fn it_should_authenticate_a_peer_by_using_a_key() { let tracker = private_tracker(); - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - let result = tracker.authenticate(&key.id()).await; + let result = tracker.authenticate(&expiring_key.key()).await; assert!(result.is_ok()); } @@ -1156,9 +1156,9 @@ mod tests { // `verify_auth_key` should be a private method. let tracker = private_tracker(); - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); } #[tokio::test] @@ -1176,25 +1176,25 @@ mod tests { let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - let result = tracker.remove_auth_key(&expiring_key.id()).await; + let result = tracker.remove_auth_key(&expiring_key.key()).await; assert!(result.is_ok()); - assert!(tracker.verify_auth_key(&expiring_key.id()).await.is_err()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_err()); } #[tokio::test] async fn it_should_load_authentication_keys_from_the_database() { let tracker = private_tracker(); - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); // Remove the newly generated key in memory - tracker.keys.write().await.remove(&key.id()); + tracker.keys.write().await.remove(&expiring_key.key()); let result = tracker.load_keys_from_database().await; assert!(result.is_ok()); - assert!(tracker.verify_auth_key(&key.id()).await.is_ok()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); } } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index eda42f1ee..501c0f6fa 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1215,9 +1215,9 @@ mod configured_as_private { async fn should_respond_to_authenticated_peers() { let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) .announce(&QueryBuilder::default().query()) .await; @@ -1353,9 +1353,9 @@ mod configured_as_private { ) .await; - let key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), key.id()) + let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) From d51aae049260af54e1a5253a5f8dc53a277e2a31 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Mar 2023 15:45:21 +0000 Subject: [PATCH 417/435] feat(tracker): [#164] add prefix 0x to peer ID hex string --- src/tracker/peer.rs | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 015af12a3..3012770bb 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -169,8 +169,13 @@ impl Id { pub fn to_hex_string(&self) -> Option { let buff_size = self.0.len() * 2; let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); - std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) + + match std::str::from_utf8(&tmp) { + Ok(hex) => Some(format!("0x{hex}")), + Err(_) => None, + } } #[must_use] @@ -360,23 +365,23 @@ mod test { #[test] fn should_be_converted_to_hex_string() { let id = peer::Id(*b"-qB00000000000000000"); - assert_eq!(id.to_hex_string().unwrap(), "2d71423030303030303030303030303030303030"); + assert_eq!(id.to_hex_string().unwrap(), "0x2d71423030303030303030303030303030303030"); let id = peer::Id([ 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, ]); - assert_eq!(id.to_hex_string().unwrap(), "009f9296009f9296009f9296009f9296009f9296"); + assert_eq!(id.to_hex_string().unwrap(), "0x009f9296009f9296009f9296009f9296009f9296"); } #[test] fn should_be_converted_into_string_type_using_the_hex_string_format() { let id = peer::Id(*b"-qB00000000000000000"); - assert_eq!(id.to_string(), "2d71423030303030303030303030303030303030"); + assert_eq!(id.to_string(), "0x2d71423030303030303030303030303030303030"); let id = peer::Id([ 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, ]); - assert_eq!(id.to_string(), "009f9296009f9296009f9296009f9296009f9296"); + assert_eq!(id.to_string(), "0x009f9296009f9296009f9296009f9296009f9296"); } #[test] @@ -390,6 +395,7 @@ mod test { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use serde_json::Value; use crate::protocol::clock::{Current, Time}; use crate::tracker::peer::{self, Peer}; @@ -406,12 +412,26 @@ mod test { event: AnnounceEvent::Started, }; - let json_serialized_value = serde_json::to_string(&torrent_peer).unwrap(); + let raw_json = serde_json::to_string(&torrent_peer).unwrap(); + + let expected_raw_json = r#" + { + "peer_id": { + "id": "0x2d71423030303030303030303030303030303030", + "client": "qBittorrent" + }, + "peer_addr":"126.0.0.1:8080", + "updated":0, + "uploaded":0, + "downloaded":0, + "left":0, + "event":"Started" + } + "#; assert_eq!( - json_serialized_value, - // todo: compare using pretty json format to improve readability - r#"{"peer_id":{"id":"2d71423030303030303030303030303030303030","client":"qBittorrent"},"peer_addr":"126.0.0.1:8080","updated":0,"uploaded":0,"downloaded":0,"left":0,"event":"Started"}"# + serde_json::from_str::(&raw_json).unwrap(), + serde_json::from_str::(expected_raw_json).unwrap() ); } } From 46f49005f722c98401689cec5160131acba2eaa4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 07:45:14 +0000 Subject: [PATCH 418/435] refactor: add servers modules Move apps into a "servers" module. --- src/jobs/http_tracker.rs | 4 +- src/jobs/tracker_apis.rs | 2 +- src/jobs/udp_tracker.rs | 2 +- src/lib.rs | 4 +- src/{ => servers}/apis/mod.rs | 0 src/{ => servers}/apis/routes.rs | 0 src/{ => servers}/apis/server.rs | 2 +- .../apis/v1/context/auth_key/handlers.rs | 4 +- .../apis/v1/context/auth_key/mod.rs | 0 .../apis/v1/context/auth_key/resources.rs | 0 .../apis/v1/context/auth_key/responses.rs | 4 +- .../apis/v1/context/auth_key/routes.rs | 0 src/{ => servers}/apis/v1/context/mod.rs | 0 .../apis/v1/context/stats/handlers.rs | 0 .../apis/v1/context/stats/mod.rs | 0 .../apis/v1/context/stats/resources.rs | 0 .../apis/v1/context/stats/responses.rs | 0 .../apis/v1/context/stats/routes.rs | 0 .../apis/v1/context/torrent/handlers.rs | 4 +- .../apis/v1/context/torrent/mod.rs | 0 .../apis/v1/context/torrent/resources/mod.rs | 0 .../apis/v1/context/torrent/resources/peer.rs | 0 .../v1/context/torrent/resources/torrent.rs | 4 +- .../apis/v1/context/torrent/responses.rs | 0 .../apis/v1/context/torrent/routes.rs | 0 .../apis/v1/context/whitelist/handlers.rs | 4 +- .../apis/v1/context/whitelist/mod.rs | 0 .../apis/v1/context/whitelist/responses.rs | 2 +- .../apis/v1/context/whitelist/routes.rs | 0 src/{ => servers}/apis/v1/middlewares/auth.rs | 2 +- src/{ => servers}/apis/v1/middlewares/mod.rs | 0 src/{ => servers}/apis/v1/mod.rs | 0 src/{ => servers}/apis/v1/responses.rs | 0 src/{ => servers}/apis/v1/routes.rs | 0 src/{ => servers}/http/mod.rs | 0 src/{ => servers}/http/percent_encoding.rs | 2 +- src/{ => servers}/http/server.rs | 0 .../http/v1/extractors/announce_request.rs | 10 +-- .../http/v1/extractors/authentication_key.rs | 6 +- .../http/v1/extractors/client_ip_sources.rs | 2 +- src/{ => servers}/http/v1/extractors/mod.rs | 0 .../http/v1/extractors/scrape_request.rs | 10 +-- .../http/v1/handlers/announce.rs | 42 +++++----- .../http/v1/handlers/common/auth.rs | 2 +- .../http/v1/handlers/common/mod.rs | 0 .../http/v1/handlers/common/peer_ip.rs | 8 +- src/{ => servers}/http/v1/handlers/mod.rs | 0 src/{ => servers}/http/v1/handlers/scrape.rs | 34 ++++---- src/{ => servers}/http/v1/launcher.rs | 2 +- src/{ => servers}/http/v1/mod.rs | 0 src/{ => servers}/http/v1/query.rs | 8 +- .../http/v1/requests/announce.rs | 16 ++-- src/{ => servers}/http/v1/requests/mod.rs | 0 src/{ => servers}/http/v1/requests/scrape.rs | 14 ++-- .../http/v1/responses/announce.rs | 4 +- src/{ => servers}/http/v1/responses/error.rs | 0 src/{ => servers}/http/v1/responses/mod.rs | 0 src/{ => servers}/http/v1/responses/scrape.rs | 2 +- src/{ => servers}/http/v1/routes.rs | 0 .../http/v1/services/announce.rs | 4 +- src/{ => servers}/http/v1/services/mod.rs | 0 .../http/v1/services/peer_ip_resolver.rs | 4 +- src/{ => servers}/http/v1/services/scrape.rs | 12 ++- src/servers/mod.rs | 3 + src/{ => servers}/udp/connection_cookie.rs | 2 +- src/{ => servers}/udp/error.rs | 0 src/{ => servers}/udp/handlers.rs | 82 +++++++++---------- src/{ => servers}/udp/mod.rs | 0 src/{ => servers}/udp/peer_builder.rs | 0 src/{ => servers}/udp/request.rs | 0 src/{ => servers}/udp/server.rs | 4 +- src/setup.rs | 2 +- tests/servers/api/test_environment.rs | 2 +- tests/servers/api/v1/asserts.rs | 6 +- .../servers/api/v1/contract/context/stats.rs | 2 +- .../api/v1/contract/context/torrent.rs | 4 +- tests/servers/http/test_environment.rs | 2 +- tests/servers/http/v1/contract.rs | 2 +- tests/servers/udp/client.rs | 2 +- tests/servers/udp/contract.rs | 2 +- tests/servers/udp/test_environment.rs | 2 +- 81 files changed, 171 insertions(+), 166 deletions(-) rename src/{ => servers}/apis/mod.rs (100%) rename src/{ => servers}/apis/routes.rs (100%) rename src/{ => servers}/apis/server.rs (99%) rename src/{ => servers}/apis/v1/context/auth_key/handlers.rs (90%) rename src/{ => servers}/apis/v1/context/auth_key/mod.rs (100%) rename src/{ => servers}/apis/v1/context/auth_key/resources.rs (100%) rename src/{ => servers}/apis/v1/context/auth_key/responses.rs (86%) rename src/{ => servers}/apis/v1/context/auth_key/routes.rs (100%) rename src/{ => servers}/apis/v1/context/mod.rs (100%) rename src/{ => servers}/apis/v1/context/stats/handlers.rs (100%) rename src/{ => servers}/apis/v1/context/stats/mod.rs (100%) rename src/{ => servers}/apis/v1/context/stats/resources.rs (100%) rename src/{ => servers}/apis/v1/context/stats/responses.rs (100%) rename src/{ => servers}/apis/v1/context/stats/routes.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/handlers.rs (94%) rename src/{ => servers}/apis/v1/context/torrent/mod.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/resources/mod.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/resources/peer.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/resources/torrent.rs (96%) rename src/{ => servers}/apis/v1/context/torrent/responses.rs (100%) rename src/{ => servers}/apis/v1/context/torrent/routes.rs (100%) rename src/{ => servers}/apis/v1/context/whitelist/handlers.rs (92%) rename src/{ => servers}/apis/v1/context/whitelist/mod.rs (100%) rename src/{ => servers}/apis/v1/context/whitelist/responses.rs (89%) rename src/{ => servers}/apis/v1/context/whitelist/routes.rs (100%) rename src/{ => servers}/apis/v1/middlewares/auth.rs (95%) rename src/{ => servers}/apis/v1/middlewares/mod.rs (100%) rename src/{ => servers}/apis/v1/mod.rs (100%) rename src/{ => servers}/apis/v1/responses.rs (100%) rename src/{ => servers}/apis/v1/routes.rs (100%) rename src/{ => servers}/http/mod.rs (100%) rename src/{ => servers}/http/percent_encoding.rs (95%) rename src/{ => servers}/http/server.rs (100%) rename src/{ => servers}/http/v1/extractors/announce_request.rs (91%) rename src/{ => servers}/http/v1/extractors/authentication_key.rs (95%) rename src/{ => servers}/http/v1/extractors/client_ip_sources.rs (93%) rename src/{ => servers}/http/v1/extractors/mod.rs (100%) rename src/{ => servers}/http/v1/extractors/scrape_request.rs (93%) rename src/{ => servers}/http/v1/handlers/announce.rs (86%) rename src/{ => servers}/http/v1/handlers/common/auth.rs (96%) rename src/{ => servers}/http/v1/handlers/common/mod.rs (100%) rename src/{ => servers}/http/v1/handlers/common/peer_ip.rs (77%) rename src/{ => servers}/http/v1/handlers/mod.rs (100%) rename src/{ => servers}/http/v1/handlers/scrape.rs (86%) rename src/{ => servers}/http/v1/launcher.rs (98%) rename src/{ => servers}/http/v1/mod.rs (100%) rename src/{ => servers}/http/v1/query.rs (97%) rename src/{ => servers}/http/v1/requests/announce.rs (97%) rename src/{ => servers}/http/v1/requests/mod.rs (100%) rename src/{ => servers}/http/v1/requests/scrape.rs (89%) rename src/{ => servers}/http/v1/responses/announce.rs (98%) rename src/{ => servers}/http/v1/responses/error.rs (100%) rename src/{ => servers}/http/v1/responses/mod.rs (100%) rename src/{ => servers}/http/v1/responses/scrape.rs (97%) rename src/{ => servers}/http/v1/routes.rs (100%) rename src/{ => servers}/http/v1/services/announce.rs (97%) rename src/{ => servers}/http/v1/services/mod.rs (100%) rename src/{ => servers}/http/v1/services/peer_ip_resolver.rs (95%) rename src/{ => servers}/http/v1/services/scrape.rs (95%) create mode 100644 src/servers/mod.rs rename src/{ => servers}/udp/connection_cookie.rs (99%) rename src/{ => servers}/udp/error.rs (100%) rename src/{ => servers}/udp/handlers.rs (94%) rename src/{ => servers}/udp/mod.rs (100%) rename src/{ => servers}/udp/peer_builder.rs (100%) rename src/{ => servers}/udp/request.rs (100%) rename src/{ => servers}/udp/server.rs (98%) diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index e0091958b..43bd0076f 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -6,8 +6,8 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use crate::http::v1::launcher; -use crate::http::Version; +use crate::servers::http::v1::launcher; +use crate::servers::http::Version; use crate::tracker; #[derive(Debug)] diff --git a/src/jobs/tracker_apis.rs b/src/jobs/tracker_apis.rs index 939b58638..cdebc21a8 100644 --- a/src/jobs/tracker_apis.rs +++ b/src/jobs/tracker_apis.rs @@ -6,7 +6,7 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpApi; -use crate::apis::server; +use crate::servers::apis::server; use crate::tracker; #[derive(Debug)] diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 57232855b..138222daf 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -4,8 +4,8 @@ use log::{error, info, warn}; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; +use crate::servers::udp::server::Udp; use crate::tracker; -use crate::udp::server::Udp; #[must_use] pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { diff --git a/src/lib.rs b/src/lib.rs index f01ff0468..6c0ae464f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,14 +1,12 @@ -pub mod apis; pub mod databases; -pub mod http; pub mod jobs; pub mod logging; pub mod protocol; +pub mod servers; pub mod setup; pub mod signals; pub mod stats; pub mod tracker; -pub mod udp; #[macro_use] extern crate lazy_static; diff --git a/src/apis/mod.rs b/src/servers/apis/mod.rs similarity index 100% rename from src/apis/mod.rs rename to src/servers/apis/mod.rs diff --git a/src/apis/routes.rs b/src/servers/apis/routes.rs similarity index 100% rename from src/apis/routes.rs rename to src/servers/apis/routes.rs diff --git a/src/apis/server.rs b/src/servers/apis/server.rs similarity index 99% rename from src/apis/server.rs rename to src/servers/apis/server.rs index daac35999..002babbfb 100644 --- a/src/apis/server.rs +++ b/src/servers/apis/server.rs @@ -219,7 +219,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::apis::server::ApiServer; + use crate::servers::apis::server::ApiServer; use crate::tracker; use crate::tracker::statistics; diff --git a/src/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs similarity index 90% rename from src/apis/v1/context/auth_key/handlers.rs rename to src/servers/apis/v1/context/auth_key/handlers.rs index cb1cd1113..d2e633206 100644 --- a/src/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -9,8 +9,8 @@ use serde::Deserialize; use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, }; -use crate::apis::v1::context::auth_key::resources::AuthKey; -use crate::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; +use crate::servers::apis::v1::context::auth_key::resources::AuthKey; +use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; use crate::tracker::auth::Key; use crate::tracker::Tracker; diff --git a/src/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs similarity index 100% rename from src/apis/v1/context/auth_key/mod.rs rename to src/servers/apis/v1/context/auth_key/mod.rs diff --git a/src/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs similarity index 100% rename from src/apis/v1/context/auth_key/resources.rs rename to src/servers/apis/v1/context/auth_key/resources.rs diff --git a/src/apis/v1/context/auth_key/responses.rs b/src/servers/apis/v1/context/auth_key/responses.rs similarity index 86% rename from src/apis/v1/context/auth_key/responses.rs rename to src/servers/apis/v1/context/auth_key/responses.rs index 9b8fcebe2..4e3b0c711 100644 --- a/src/apis/v1/context/auth_key/responses.rs +++ b/src/servers/apis/v1/context/auth_key/responses.rs @@ -3,8 +3,8 @@ use std::error::Error; use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Response}; -use crate::apis::v1::context::auth_key::resources::AuthKey; -use crate::apis::v1::responses::unhandled_rejection_response; +use crate::servers::apis::v1::context::auth_key::resources::AuthKey; +use crate::servers::apis::v1::responses::unhandled_rejection_response; /// # Panics /// diff --git a/src/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs similarity index 100% rename from src/apis/v1/context/auth_key/routes.rs rename to src/servers/apis/v1/context/auth_key/routes.rs diff --git a/src/apis/v1/context/mod.rs b/src/servers/apis/v1/context/mod.rs similarity index 100% rename from src/apis/v1/context/mod.rs rename to src/servers/apis/v1/context/mod.rs diff --git a/src/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs similarity index 100% rename from src/apis/v1/context/stats/handlers.rs rename to src/servers/apis/v1/context/stats/handlers.rs diff --git a/src/apis/v1/context/stats/mod.rs b/src/servers/apis/v1/context/stats/mod.rs similarity index 100% rename from src/apis/v1/context/stats/mod.rs rename to src/servers/apis/v1/context/stats/mod.rs diff --git a/src/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs similarity index 100% rename from src/apis/v1/context/stats/resources.rs rename to src/servers/apis/v1/context/stats/resources.rs diff --git a/src/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs similarity index 100% rename from src/apis/v1/context/stats/responses.rs rename to src/servers/apis/v1/context/stats/responses.rs diff --git a/src/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs similarity index 100% rename from src/apis/v1/context/stats/routes.rs rename to src/servers/apis/v1/context/stats/routes.rs diff --git a/src/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs similarity index 94% rename from src/apis/v1/context/torrent/handlers.rs rename to src/servers/apis/v1/context/torrent/handlers.rs index fc816cdbf..45ffbcf22 100644 --- a/src/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -8,9 +8,9 @@ use serde::{de, Deserialize, Deserializer}; use super::resources::torrent::ListItem; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::apis::v1::responses::invalid_info_hash_param_response; -use crate::apis::InfoHashParam; use crate::protocol::info_hash::InfoHash; +use crate::servers::apis::v1::responses::invalid_info_hash_param_response; +use crate::servers::apis::InfoHashParam; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; diff --git a/src/apis/v1/context/torrent/mod.rs b/src/servers/apis/v1/context/torrent/mod.rs similarity index 100% rename from src/apis/v1/context/torrent/mod.rs rename to src/servers/apis/v1/context/torrent/mod.rs diff --git a/src/apis/v1/context/torrent/resources/mod.rs b/src/servers/apis/v1/context/torrent/resources/mod.rs similarity index 100% rename from src/apis/v1/context/torrent/resources/mod.rs rename to src/servers/apis/v1/context/torrent/resources/mod.rs diff --git a/src/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs similarity index 100% rename from src/apis/v1/context/torrent/resources/peer.rs rename to src/servers/apis/v1/context/torrent/resources/peer.rs diff --git a/src/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs similarity index 96% rename from src/apis/v1/context/torrent/resources/torrent.rs rename to src/servers/apis/v1/context/torrent/resources/torrent.rs index 48f4c58f0..577ac279c 100644 --- a/src/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -75,10 +75,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use super::Torrent; - use crate::apis::v1::context::torrent::resources::peer::Peer; - use crate::apis::v1::context::torrent::resources::torrent::ListItem; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; + use crate::servers::apis::v1::context::torrent::resources::peer::Peer; + use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; use crate::tracker::peer; use crate::tracker::services::torrent::{BasicInfo, Info}; diff --git a/src/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs similarity index 100% rename from src/apis/v1/context/torrent/responses.rs rename to src/servers/apis/v1/context/torrent/responses.rs diff --git a/src/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs similarity index 100% rename from src/apis/v1/context/torrent/routes.rs rename to src/servers/apis/v1/context/torrent/routes.rs diff --git a/src/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs similarity index 92% rename from src/apis/v1/context/whitelist/handlers.rs rename to src/servers/apis/v1/context/whitelist/handlers.rs index 325f20e26..2ca70cba7 100644 --- a/src/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -7,9 +7,9 @@ use axum::response::Response; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; -use crate::apis::InfoHashParam; use crate::protocol::info_hash::InfoHash; +use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; +use crate::servers::apis::InfoHashParam; use crate::tracker::Tracker; pub async fn add_torrent_to_whitelist_handler( diff --git a/src/apis/v1/context/whitelist/mod.rs b/src/servers/apis/v1/context/whitelist/mod.rs similarity index 100% rename from src/apis/v1/context/whitelist/mod.rs rename to src/servers/apis/v1/context/whitelist/mod.rs diff --git a/src/apis/v1/context/whitelist/responses.rs b/src/servers/apis/v1/context/whitelist/responses.rs similarity index 89% rename from src/apis/v1/context/whitelist/responses.rs rename to src/servers/apis/v1/context/whitelist/responses.rs index 197d4c90b..06d4a9448 100644 --- a/src/apis/v1/context/whitelist/responses.rs +++ b/src/servers/apis/v1/context/whitelist/responses.rs @@ -2,7 +2,7 @@ use std::error::Error; use axum::response::Response; -use crate::apis::v1::responses::unhandled_rejection_response; +use crate::servers::apis::v1::responses::unhandled_rejection_response; #[must_use] pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { diff --git a/src/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs similarity index 100% rename from src/apis/v1/context/whitelist/routes.rs rename to src/servers/apis/v1/context/whitelist/routes.rs diff --git a/src/apis/v1/middlewares/auth.rs b/src/servers/apis/v1/middlewares/auth.rs similarity index 95% rename from src/apis/v1/middlewares/auth.rs rename to src/servers/apis/v1/middlewares/auth.rs index e729072b6..f0c63250b 100644 --- a/src/apis/v1/middlewares/auth.rs +++ b/src/servers/apis/v1/middlewares/auth.rs @@ -7,7 +7,7 @@ use axum::response::{IntoResponse, Response}; use serde::Deserialize; use torrust_tracker_configuration::{Configuration, HttpApi}; -use crate::apis::v1::responses::unhandled_rejection_response; +use crate::servers::apis::v1::responses::unhandled_rejection_response; #[derive(Deserialize, Debug)] pub struct QueryParams { diff --git a/src/apis/v1/middlewares/mod.rs b/src/servers/apis/v1/middlewares/mod.rs similarity index 100% rename from src/apis/v1/middlewares/mod.rs rename to src/servers/apis/v1/middlewares/mod.rs diff --git a/src/apis/v1/mod.rs b/src/servers/apis/v1/mod.rs similarity index 100% rename from src/apis/v1/mod.rs rename to src/servers/apis/v1/mod.rs diff --git a/src/apis/v1/responses.rs b/src/servers/apis/v1/responses.rs similarity index 100% rename from src/apis/v1/responses.rs rename to src/servers/apis/v1/responses.rs diff --git a/src/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs similarity index 100% rename from src/apis/v1/routes.rs rename to src/servers/apis/v1/routes.rs diff --git a/src/http/mod.rs b/src/servers/http/mod.rs similarity index 100% rename from src/http/mod.rs rename to src/servers/http/mod.rs diff --git a/src/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs similarity index 95% rename from src/http/percent_encoding.rs rename to src/servers/http/percent_encoding.rs index 3774519fb..c824c8df7 100644 --- a/src/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -21,8 +21,8 @@ pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result responses::error::Error { mod tests { use super::parse_key; - use crate::http::v1::responses::error::Error; + use crate::servers::http::v1::responses::error::Error; fn assert_error_response(error: &Error, error_message: &str) { assert!( diff --git a/src/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs similarity index 93% rename from src/http/v1/extractors/client_ip_sources.rs rename to src/servers/http/v1/extractors/client_ip_sources.rs index c8b3659f3..b291eba12 100644 --- a/src/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -8,7 +8,7 @@ use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; -use crate::http::v1::services::peer_ip_resolver::ClientIpSources; +use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; pub struct Extract(pub ClientIpSources); diff --git a/src/http/v1/extractors/mod.rs b/src/servers/http/v1/extractors/mod.rs similarity index 100% rename from src/http/v1/extractors/mod.rs rename to src/servers/http/v1/extractors/mod.rs diff --git a/src/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs similarity index 93% rename from src/http/v1/extractors/scrape_request.rs rename to src/servers/http/v1/extractors/scrape_request.rs index d63470897..65a40bff2 100644 --- a/src/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -5,9 +5,9 @@ use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use crate::http::v1::query::Query; -use crate::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; -use crate::http::v1::responses; +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; +use crate::servers::http::v1::responses; pub struct ExtractRequest(pub Scrape); @@ -53,9 +53,9 @@ mod tests { use std::str::FromStr; use super::extract_scrape_from; - use crate::http::v1::requests::scrape::Scrape; - use crate::http::v1::responses::error::Error; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::requests::scrape::Scrape; + use crate::servers::http::v1::responses::error::Error; struct TestInfoHash { pub bencoded: String, diff --git a/src/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs similarity index 86% rename from src/http/v1/handlers/announce.rs rename to src/servers/http/v1/handlers/announce.rs index 1f10c3fa4..af8a4115e 100644 --- a/src/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -7,15 +7,15 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::v1::extractors::announce_request::ExtractRequest; -use crate::http::v1::extractors::authentication_key::Extract as ExtractKey; -use crate::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::http::v1::handlers::common::auth; -use crate::http::v1::requests::announce::{Announce, Compact, Event}; -use crate::http::v1::responses::{self, announce}; -use crate::http::v1::services::peer_ip_resolver::ClientIpSources; -use crate::http::v1::services::{self, peer_ip_resolver}; use crate::protocol::clock::{Current, Time}; +use crate::servers::http::v1::extractors::announce_request::ExtractRequest; +use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::servers::http::v1::handlers::common::auth; +use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; +use crate::servers::http::v1::responses::{self, announce}; +use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; +use crate::servers::http::v1::services::{self, peer_ip_resolver}; use crate::tracker::auth::Key; use crate::tracker::peer::Peer; use crate::tracker::{AnnounceData, Tracker}; @@ -141,10 +141,10 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::http::v1::requests::announce::Announce; - use crate::http::v1::responses; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::requests::announce::Announce; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; @@ -197,8 +197,8 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; - use crate::http::v1::handlers::announce::handle_announce; - use crate::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; use crate::tracker::auth; #[tokio::test] @@ -238,8 +238,8 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, sample_client_ip_sources, whitelisted_tracker}; - use crate::http::v1::handlers::announce::handle_announce; - use crate::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { @@ -266,9 +266,9 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, tracker_on_reverse_proxy}; - use crate::http::v1::handlers::announce::handle_announce; - use crate::http::v1::handlers::announce::tests::assert_error_response; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -295,9 +295,9 @@ mod tests { use std::sync::Arc; use super::{sample_announce_request, tracker_not_on_reverse_proxy}; - use crate::http::v1::handlers::announce::handle_announce; - use crate::http::v1::handlers::announce::tests::assert_error_response; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs similarity index 96% rename from src/http/v1/handlers/common/auth.rs rename to src/servers/http/v1/handlers/common/auth.rs index 938fc3f01..644556e95 100644 --- a/src/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -2,7 +2,7 @@ use std::panic::Location; use thiserror::Error; -use crate::http::v1::responses; +use crate::servers::http::v1::responses; use crate::tracker::auth; #[derive(Debug, Error)] diff --git a/src/http/v1/handlers/common/mod.rs b/src/servers/http/v1/handlers/common/mod.rs similarity index 100% rename from src/http/v1/handlers/common/mod.rs rename to src/servers/http/v1/handlers/common/mod.rs diff --git a/src/http/v1/handlers/common/peer_ip.rs b/src/servers/http/v1/handlers/common/peer_ip.rs similarity index 77% rename from src/http/v1/handlers/common/peer_ip.rs rename to src/servers/http/v1/handlers/common/peer_ip.rs index e182c716b..685324b4a 100644 --- a/src/http/v1/handlers/common/peer_ip.rs +++ b/src/servers/http/v1/handlers/common/peer_ip.rs @@ -1,5 +1,5 @@ -use crate::http::v1::responses; -use crate::http::v1::services::peer_ip_resolver::PeerIpResolutionError; +use crate::servers::http::v1::responses; +use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; impl From for responses::error::Error { fn from(err: PeerIpResolutionError) -> Self { @@ -13,8 +13,8 @@ impl From for responses::error::Error { mod tests { use std::panic::Location; - use crate::http::v1::responses; - use crate::http::v1::services::peer_ip_resolver::PeerIpResolutionError; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; fn assert_error_response(error: &responses::error::Error, error_message: &str) { assert!( diff --git a/src/http/v1/handlers/mod.rs b/src/servers/http/v1/handlers/mod.rs similarity index 100% rename from src/http/v1/handlers/mod.rs rename to src/servers/http/v1/handlers/mod.rs diff --git a/src/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs similarity index 86% rename from src/http/v1/handlers/scrape.rs rename to src/servers/http/v1/handlers/scrape.rs index 50f92cd36..75c5717de 100644 --- a/src/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -4,12 +4,12 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use log::debug; -use crate::http::v1::extractors::authentication_key::Extract as ExtractKey; -use crate::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::http::v1::extractors::scrape_request::ExtractRequest; -use crate::http::v1::requests::scrape::Scrape; -use crate::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; -use crate::http::v1::{responses, services}; +use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; +use crate::servers::http::v1::requests::scrape::Scrape; +use crate::servers::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use crate::servers::http::v1::{responses, services}; use crate::tracker::auth::Key; use crate::tracker::{ScrapeData, Tracker}; @@ -99,10 +99,10 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::http::v1::requests::scrape::Scrape; - use crate::http::v1::responses; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::requests::scrape::Scrape; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::tracker::services::common::tracker_factory; use crate::tracker::Tracker; @@ -147,7 +147,7 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::handle_scrape; use crate::tracker::{auth, ScrapeData}; #[tokio::test] @@ -189,7 +189,7 @@ mod tests { use std::sync::Arc; use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; - use crate::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::handle_scrape; use crate::tracker::ScrapeData; #[tokio::test] @@ -212,9 +212,9 @@ mod tests { use std::sync::Arc; use super::{sample_scrape_request, tracker_on_reverse_proxy}; - use crate::http::v1::handlers::scrape::handle_scrape; - use crate::http::v1::handlers::scrape::tests::assert_error_response; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -240,9 +240,9 @@ mod tests { use std::sync::Arc; use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; - use crate::http::v1::handlers::scrape::handle_scrape; - use crate::http::v1::handlers::scrape::tests::assert_error_response; - use crate::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/http/v1/launcher.rs b/src/servers/http/v1/launcher.rs similarity index 98% rename from src/http/v1/launcher.rs rename to src/servers/http/v1/launcher.rs index 45bc54664..4cfa4295d 100644 --- a/src/http/v1/launcher.rs +++ b/src/servers/http/v1/launcher.rs @@ -10,7 +10,7 @@ use futures::future::BoxFuture; use log::info; use super::routes::router; -use crate::http::server::HttpServerLauncher; +use crate::servers::http::server::HttpServerLauncher; use crate::tracker::Tracker; #[derive(Debug)] diff --git a/src/http/v1/mod.rs b/src/servers/http/v1/mod.rs similarity index 100% rename from src/http/v1/mod.rs rename to src/servers/http/v1/mod.rs diff --git a/src/http/v1/query.rs b/src/servers/http/v1/query.rs similarity index 97% rename from src/http/v1/query.rs rename to src/servers/http/v1/query.rs index 45484ea38..c40e7949f 100644 --- a/src/http/v1/query.rs +++ b/src/servers/http/v1/query.rs @@ -174,7 +174,7 @@ impl std::fmt::Display for FieldValuePairSet { mod tests { mod url_query { - use crate::http::v1::query::Query; + use crate::servers::http::v1::query::Query; #[test] fn should_parse_the_query_params_from_an_url_query_string() { @@ -227,7 +227,7 @@ mod tests { } mod should_allow_more_than_one_value_for_the_same_param { - use crate::http::v1::query::Query; + use crate::servers::http::v1::query::Query; #[test] fn instantiated_from_a_vector() { @@ -249,7 +249,7 @@ mod tests { } mod should_be_displayed { - use crate::http::v1::query::Query; + use crate::servers::http::v1::query::Query; #[test] fn with_one_param() { @@ -270,7 +270,7 @@ mod tests { } mod param_name_value_pair { - use crate::http::v1::query::NameValuePair; + use crate::servers::http::v1::query::NameValuePair; #[test] fn should_parse_a_single_query_param() { diff --git a/src/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs similarity index 97% rename from src/http/v1/requests/announce.rs rename to src/servers/http/v1/requests/announce.rs index eeab97d5f..3b1e55cb9 100644 --- a/src/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -5,10 +5,10 @@ use std::str::FromStr; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; -use crate::http::v1::query::{ParseQueryError, Query}; -use crate::http::v1::responses; use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::servers::http::v1::query::{ParseQueryError, Query}; +use crate::servers::http::v1::responses; use crate::tracker::peer::{self, IdConversionError}; pub type NumberOfBytes = i64; @@ -280,11 +280,11 @@ mod tests { mod announce_request { - use crate::http::v1::query::Query; - use crate::http::v1::requests::announce::{ + use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; - use crate::protocol::info_hash::InfoHash; use crate::tracker::peer; #[test] @@ -350,8 +350,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::http::v1::query::Query; - use crate::http::v1::requests::announce::{ + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::announce::{ Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; diff --git a/src/http/v1/requests/mod.rs b/src/servers/http/v1/requests/mod.rs similarity index 100% rename from src/http/v1/requests/mod.rs rename to src/servers/http/v1/requests/mod.rs diff --git a/src/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs similarity index 89% rename from src/http/v1/requests/scrape.rs rename to src/servers/http/v1/requests/scrape.rs index 6257f0733..e50895c20 100644 --- a/src/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -3,10 +3,10 @@ use std::panic::Location; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::http::percent_encoding::percent_decode_info_hash; -use crate::http::v1::query::Query; -use crate::http::v1::responses; use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::servers::http::percent_encoding::percent_decode_info_hash; +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::responses; pub type NumberOfBytes = i64; @@ -85,9 +85,9 @@ mod tests { mod scrape_request { - use crate::http::v1::query::Query; - use crate::http::v1::requests::scrape::{Scrape, INFO_HASH}; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; #[test] fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { @@ -107,8 +107,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::http::v1::query::Query; - use crate::http::v1::requests::scrape::{Scrape, INFO_HASH}; + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; #[test] fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { diff --git a/src/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs similarity index 98% rename from src/http/v1/responses/announce.rs rename to src/servers/http/v1/responses/announce.rs index 8b178ff7e..4902e0d62 100644 --- a/src/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -8,7 +8,7 @@ use bip_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut} use serde::{self, Deserialize, Serialize}; use thiserror::Error; -use crate::http::v1::responses; +use crate::servers::http::v1::responses; use crate::tracker::{self, AnnounceData}; /// Normal (non compact) "announce" response @@ -250,7 +250,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use super::{NonCompact, Peer}; - use crate::http::v1::responses::announce::{Compact, CompactPeer}; + use crate::servers::http::v1::responses::announce::{Compact, CompactPeer}; // Some ascii values used in tests: // diff --git a/src/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs similarity index 100% rename from src/http/v1/responses/error.rs rename to src/servers/http/v1/responses/error.rs diff --git a/src/http/v1/responses/mod.rs b/src/servers/http/v1/responses/mod.rs similarity index 100% rename from src/http/v1/responses/mod.rs rename to src/servers/http/v1/responses/mod.rs diff --git a/src/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs similarity index 97% rename from src/http/v1/responses/scrape.rs rename to src/servers/http/v1/responses/scrape.rs index 5cbe6502e..7d9e169c8 100644 --- a/src/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -55,8 +55,8 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { - use crate::http::v1::responses::scrape::Bencoded; use crate::protocol::info_hash::InfoHash; + use crate::servers::http::v1::responses::scrape::Bencoded; use crate::tracker::torrent::SwarmMetadata; use crate::tracker::ScrapeData; diff --git a/src/http/v1/routes.rs b/src/servers/http/v1/routes.rs similarity index 100% rename from src/http/v1/routes.rs rename to src/servers/http/v1/routes.rs diff --git a/src/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs similarity index 97% rename from src/http/v1/services/announce.rs rename to src/servers/http/v1/services/announce.rs index a8b9f0d06..3f8c5a839 100644 --- a/src/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -77,8 +77,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::http::v1::services::announce::invoke; - use crate::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + use crate::servers::http::v1::services::announce::invoke; + use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; use crate::tracker::peer::Peer; use crate::tracker::torrent::SwarmStats; use crate::tracker::{statistics, AnnounceData, Tracker}; diff --git a/src/http/v1/services/mod.rs b/src/servers/http/v1/services/mod.rs similarity index 100% rename from src/http/v1/services/mod.rs rename to src/servers/http/v1/services/mod.rs diff --git a/src/http/v1/services/peer_ip_resolver.rs b/src/servers/http/v1/services/peer_ip_resolver.rs similarity index 95% rename from src/http/v1/services/peer_ip_resolver.rs rename to src/servers/http/v1/services/peer_ip_resolver.rs index c7bc183b4..ac5b8c79f 100644 --- a/src/http/v1/services/peer_ip_resolver.rs +++ b/src/servers/http/v1/services/peer_ip_resolver.rs @@ -73,7 +73,7 @@ mod tests { use std::str::FromStr; use super::invoke; - use crate::http::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + use crate::servers::http::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_connection_info() { @@ -112,7 +112,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; - use crate::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + use crate::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { diff --git a/src/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs similarity index 95% rename from src/http/v1/services/scrape.rs rename to src/servers/http/v1/services/scrape.rs index b6f319375..1044634ad 100644 --- a/src/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -77,8 +77,10 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; - use crate::http::v1::services::scrape::invoke; - use crate::http::v1::services::scrape::tests::{public_tracker, sample_info_hash, sample_info_hashes, sample_peer}; + use crate::servers::http::v1::services::scrape::invoke; + use crate::servers::http::v1::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; use crate::tracker::torrent::SwarmMetadata; use crate::tracker::{statistics, ScrapeData, Tracker}; @@ -167,8 +169,10 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; - use crate::http::v1::services::scrape::fake; - use crate::http::v1::services::scrape::tests::{public_tracker, sample_info_hash, sample_info_hashes, sample_peer}; + use crate::servers::http::v1::services::scrape::fake; + use crate::servers::http::v1::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; use crate::tracker::{statistics, ScrapeData, Tracker}; #[tokio::test] diff --git a/src/servers/mod.rs b/src/servers/mod.rs new file mode 100644 index 000000000..17005b56d --- /dev/null +++ b/src/servers/mod.rs @@ -0,0 +1,3 @@ +pub mod apis; +pub mod http; +pub mod udp; diff --git a/src/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs similarity index 99% rename from src/udp/connection_cookie.rs rename to src/servers/udp/connection_cookie.rs index ef2a8b219..a55d40801 100644 --- a/src/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -92,7 +92,7 @@ mod tests { use super::cookie_builder::{self}; use crate::protocol::clock::time_extent::{self, Extent}; use crate::protocol::clock::{Stopped, StoppedTime}; - use crate::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; + use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); diff --git a/src/udp/error.rs b/src/servers/udp/error.rs similarity index 100% rename from src/udp/error.rs rename to src/servers/udp/error.rs diff --git a/src/udp/handlers.rs b/src/servers/udp/handlers.rs similarity index 94% rename from src/udp/handlers.rs rename to src/servers/udp/handlers.rs index 41b1184dc..1544e13cc 100644 --- a/src/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -11,10 +11,10 @@ use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; use crate::protocol::common::MAX_SCRAPE_TORRENTS; use crate::protocol::info_hash::InfoHash; +use crate::servers::udp::error::Error; +use crate::servers::udp::peer_builder; +use crate::servers::udp::request::AnnounceWrapper; use crate::tracker::{statistics, Tracker}; -use crate::udp::error::Error; -use crate::udp::peer_builder; -use crate::udp::request::AnnounceWrapper; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { @@ -354,10 +354,10 @@ mod tests { use mockall::predicate::eq; use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_connect; + use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; use crate::tracker::{self, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_connect; - use crate::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -451,8 +451,8 @@ mod tests { TransactionId, }; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::tests::sample_ipv4_remote_addr; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::tests::sample_ipv4_remote_addr; struct AnnounceRequestBuilder { request: AnnounceRequest, @@ -525,13 +525,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::tracker::{self, peer, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_announce; - use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{ + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, }; + use crate::tracker::{self, peer, statistics}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -692,11 +692,11 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; use crate::tracker::peer; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_announce; - use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -746,13 +746,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::tracker::{self, peer, statistics}; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_announce; - use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::{ + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, }; + use crate::tracker::{self, peer, statistics}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -921,12 +921,12 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::TrackerConfigurationBuilder; use crate::tracker; use crate::tracker::statistics::Keeper; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_announce; - use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::udp::handlers::tests::TrackerConfigurationBuilder; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { @@ -983,10 +983,10 @@ mod tests { }; use super::TorrentPeerBuilder; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; use crate::tracker::{self, peer}; - use crate::udp::connection_cookie::{into_connection_id, make}; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1067,8 +1067,8 @@ mod tests { mod with_a_public_tracker { use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handlers::tests::public_tracker; - use crate::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; + use crate::servers::udp::handlers::tests::public_tracker; + use crate::servers::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { @@ -1090,11 +1090,11 @@ mod tests { use aquatic_udp_protocol::InfoHash; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::scrape_request::{ + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::scrape_request::{ add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::udp::handlers::tests::{private_tracker, sample_ipv4_remote_addr}; + use crate::servers::udp::handlers::tests::{private_tracker, sample_ipv4_remote_addr}; #[tokio::test] async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { @@ -1128,11 +1128,11 @@ mod tests { mod with_a_whitelisted_tracker { use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::scrape_request::{ + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::udp::handlers::tests::{sample_ipv4_remote_addr, whitelisted_tracker}; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, whitelisted_tracker}; #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { @@ -1195,9 +1195,9 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; use crate::tracker::{self, statistics}; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -1227,9 +1227,9 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; use crate::tracker::{self, statistics}; - use crate::udp::handlers::handle_scrape; - use crate::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { diff --git a/src/udp/mod.rs b/src/servers/udp/mod.rs similarity index 100% rename from src/udp/mod.rs rename to src/servers/udp/mod.rs diff --git a/src/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs similarity index 100% rename from src/udp/peer_builder.rs rename to src/servers/udp/peer_builder.rs diff --git a/src/udp/request.rs b/src/servers/udp/request.rs similarity index 100% rename from src/udp/request.rs rename to src/servers/udp/request.rs diff --git a/src/udp/server.rs b/src/servers/udp/server.rs similarity index 98% rename from src/udp/server.rs rename to src/servers/udp/server.rs index e52b8fd52..f44f37f5f 100644 --- a/src/udp/server.rs +++ b/src/servers/udp/server.rs @@ -9,10 +9,10 @@ use log::{debug, error, info}; use tokio::net::UdpSocket; use tokio::task::JoinHandle; +use crate::servers::udp::handlers::handle_packet; +use crate::servers::udp::MAX_PACKET_SIZE; use crate::signals::shutdown_signal; use crate::tracker::Tracker; -use crate::udp::handlers::handle_packet; -use crate::udp::MAX_PACKET_SIZE; #[derive(Debug)] pub enum Error { diff --git a/src/setup.rs b/src/setup.rs index 86de0723c..c8e5e4113 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -4,8 +4,8 @@ use log::warn; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; -use crate::http::Version; use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::servers::http::Version; use crate::tracker; /// # Panics diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index b6f5ca990..07ba46fff 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use torrust_tracker::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs index d37bcdbb4..1b1f204a2 100644 --- a/tests/servers/api/v1/asserts.rs +++ b/tests/servers/api/v1/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker::apis::v1::context::auth_key::resources::AuthKey; -use torrust_tracker::apis::v1::context::stats::resources::Stats; -use torrust_tracker::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; +use torrust_tracker::servers::apis::v1::context::auth_key::resources::AuthKey; +use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 3929a4270..038272963 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,7 +1,7 @@ use std::str::FromStr; -use torrust_tracker::apis::v1::context::stats::resources::Stats; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 702a8bcd4..69d600a8b 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use torrust_tracker::apis::v1::context::torrent::resources::peer::Peer; -use torrust_tracker::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; +use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index 459c2fbe6..aa414d62e 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use torrust_tracker::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 501c0f6fa..f67b0c5e5 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -2,7 +2,7 @@ use torrust_tracker_test_helpers::configuration; use crate::servers::http::test_environment::running_test_environment; -pub type V1 = torrust_tracker::http::v1::launcher::Launcher; +pub type V1 = torrust_tracker::servers::http::v1::launcher::Launcher; #[tokio::test] async fn test_environment_should_be_started_and_stopped() { diff --git a/tests/servers/udp/client.rs b/tests/servers/udp/client.rs index a13845b97..75467055e 100644 --- a/tests/servers/udp/client.rs +++ b/tests/servers/udp/client.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{Request, Response}; use tokio::net::UdpSocket; -use torrust_tracker::udp::MAX_PACKET_SIZE; +use torrust_tracker::servers::udp::MAX_PACKET_SIZE; use crate::servers::udp::source_address; diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 311cf5e49..3187d9871 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -6,7 +6,7 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; -use torrust_tracker::udp::MAX_PACKET_SIZE; +use torrust_tracker::servers::udp::MAX_PACKET_SIZE; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_error_response; diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index f729777a1..11a2cf6cd 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -2,9 +2,9 @@ use std::net::SocketAddr; use std::sync::Arc; use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::servers::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use torrust_tracker::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; use crate::common::tracker::new_tracker; From 389771b5b221a37908e7f0a584da5864d855f501 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 08:22:25 +0000 Subject: [PATCH 419/435] refactor: move databases mod to tracker mod Database is only use inisde the `tracker` mod. --- src/lib.rs | 1 - src/{ => tracker}/databases/driver.rs | 0 src/{ => tracker}/databases/error.rs | 0 src/{ => tracker}/databases/mod.rs | 0 src/{ => tracker}/databases/mysql.rs | 2 +- src/{ => tracker}/databases/sqlite.rs | 2 +- src/tracker/mod.rs | 3 ++- 7 files changed, 4 insertions(+), 4 deletions(-) rename src/{ => tracker}/databases/driver.rs (100%) rename src/{ => tracker}/databases/error.rs (100%) rename src/{ => tracker}/databases/mod.rs (100%) rename src/{ => tracker}/databases/mysql.rs (99%) rename src/{ => tracker}/databases/sqlite.rs (99%) diff --git a/src/lib.rs b/src/lib.rs index 6c0ae464f..442b687fa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -pub mod databases; pub mod jobs; pub mod logging; pub mod protocol; diff --git a/src/databases/driver.rs b/src/tracker/databases/driver.rs similarity index 100% rename from src/databases/driver.rs rename to src/tracker/databases/driver.rs diff --git a/src/databases/error.rs b/src/tracker/databases/error.rs similarity index 100% rename from src/databases/error.rs rename to src/tracker/databases/error.rs diff --git a/src/databases/mod.rs b/src/tracker/databases/mod.rs similarity index 100% rename from src/databases/mod.rs rename to src/tracker/databases/mod.rs diff --git a/src/databases/mysql.rs b/src/tracker/databases/mysql.rs similarity index 99% rename from src/databases/mysql.rs rename to src/tracker/databases/mysql.rs index f6918974f..ded9e1617 100644 --- a/src/databases/mysql.rs +++ b/src/tracker/databases/mysql.rs @@ -9,7 +9,7 @@ use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::DatabaseDriver; -use crate::databases::{Database, Error}; +use super::{Database, Error}; use crate::protocol::common::AUTH_KEY_LENGTH; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; diff --git a/src/databases/sqlite.rs b/src/tracker/databases/sqlite.rs similarity index 99% rename from src/databases/sqlite.rs rename to src/tracker/databases/sqlite.rs index adb201def..e230ac18f 100644 --- a/src/databases/sqlite.rs +++ b/src/tracker/databases/sqlite.rs @@ -6,7 +6,7 @@ use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::DatabaseDriver; -use crate::databases::{Database, Error}; +use super::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; use crate::protocol::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 71bb41f90..9ed28b684 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,4 +1,5 @@ pub mod auth; +pub mod databases; pub mod error; pub mod peer; pub mod services; @@ -21,8 +22,8 @@ use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::{SwarmMetadata, SwarmStats}; -use crate::databases::{self, Database}; use crate::protocol::info_hash::InfoHash; +use crate::tracker::databases::Database; pub struct Tracker { pub config: Arc, From 06ea911ef3a5da68d7d53624eb6b83b54ad5ab49 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 08:48:01 +0000 Subject: [PATCH 420/435] refactor: move signals to servers mod --- src/lib.rs | 1 - src/servers/apis/server.rs | 2 +- src/servers/http/server.rs | 2 +- src/servers/mod.rs | 1 + src/{ => servers}/signals.rs | 0 src/servers/udp/server.rs | 2 +- 6 files changed, 4 insertions(+), 4 deletions(-) rename src/{ => servers}/signals.rs (100%) diff --git a/src/lib.rs b/src/lib.rs index 442b687fa..1eb54edbd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,7 +3,6 @@ pub mod logging; pub mod protocol; pub mod servers; pub mod setup; -pub mod signals; pub mod stats; pub mod tracker; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 002babbfb..e4714cd9a 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -9,7 +9,7 @@ use futures::Future; use log::info; use super::routes::router; -use crate::signals::shutdown_signal; +use crate::servers::signals::shutdown_signal; use crate::tracker::Tracker; #[derive(Debug)] diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 98160777c..510c685d4 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use futures::future::BoxFuture; -use crate::signals::shutdown_signal; +use crate::servers::signals::shutdown_signal; use crate::tracker::Tracker; /// Trait to be implemented by a http server launcher for the tracker. diff --git a/src/servers/mod.rs b/src/servers/mod.rs index 17005b56d..a71b3f029 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,3 +1,4 @@ pub mod apis; pub mod http; +pub mod signals; pub mod udp; diff --git a/src/signals.rs b/src/servers/signals.rs similarity index 100% rename from src/signals.rs rename to src/servers/signals.rs diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs index f44f37f5f..9eb9836fe 100644 --- a/src/servers/udp/server.rs +++ b/src/servers/udp/server.rs @@ -9,9 +9,9 @@ use log::{debug, error, info}; use tokio::net::UdpSocket; use tokio::task::JoinHandle; +use crate::servers::signals::shutdown_signal; use crate::servers::udp::handlers::handle_packet; use crate::servers::udp::MAX_PACKET_SIZE; -use crate::signals::shutdown_signal; use crate::tracker::Tracker; #[derive(Debug)] From 5dab523174894e195e6e3905e0f2ed818baf952a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 09:40:48 +0000 Subject: [PATCH 421/435] refactor: extract bootstrap mod --- src/{ => bootstrap}/jobs/http_tracker.rs | 0 src/{setup.rs => bootstrap/jobs/mod.rs} | 6 +++++- src/{ => bootstrap}/jobs/torrent_cleanup.rs | 0 src/{ => bootstrap}/jobs/tracker_apis.rs | 0 src/{ => bootstrap}/jobs/udp_tracker.rs | 0 src/{ => bootstrap}/logging.rs | 0 src/bootstrap/mod.rs | 3 +++ src/{ => bootstrap}/stats.rs | 8 ++++---- src/jobs/mod.rs | 4 ---- src/lib.rs | 5 +---- src/main.rs | 10 +++++----- tests/common/tracker.rs | 4 ++-- 12 files changed, 20 insertions(+), 20 deletions(-) rename src/{ => bootstrap}/jobs/http_tracker.rs (100%) rename src/{setup.rs => bootstrap/jobs/mod.rs} (95%) rename src/{ => bootstrap}/jobs/torrent_cleanup.rs (100%) rename src/{ => bootstrap}/jobs/tracker_apis.rs (100%) rename src/{ => bootstrap}/jobs/udp_tracker.rs (100%) rename src/{ => bootstrap}/logging.rs (100%) create mode 100644 src/bootstrap/mod.rs rename src/{ => bootstrap}/stats.rs (66%) delete mode 100644 src/jobs/mod.rs diff --git a/src/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs similarity index 100% rename from src/jobs/http_tracker.rs rename to src/bootstrap/jobs/http_tracker.rs diff --git a/src/setup.rs b/src/bootstrap/jobs/mod.rs similarity index 95% rename from src/setup.rs rename to src/bootstrap/jobs/mod.rs index c8e5e4113..cbe3f509c 100644 --- a/src/setup.rs +++ b/src/bootstrap/jobs/mod.rs @@ -1,10 +1,14 @@ +pub mod http_tracker; +pub mod torrent_cleanup; +pub mod tracker_apis; +pub mod udp_tracker; + use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; -use crate::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::servers::http::Version; use crate::tracker; diff --git a/src/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs similarity index 100% rename from src/jobs/torrent_cleanup.rs rename to src/bootstrap/jobs/torrent_cleanup.rs diff --git a/src/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs similarity index 100% rename from src/jobs/tracker_apis.rs rename to src/bootstrap/jobs/tracker_apis.rs diff --git a/src/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs similarity index 100% rename from src/jobs/udp_tracker.rs rename to src/bootstrap/jobs/udp_tracker.rs diff --git a/src/logging.rs b/src/bootstrap/logging.rs similarity index 100% rename from src/logging.rs rename to src/bootstrap/logging.rs diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs new file mode 100644 index 000000000..fd51d9b90 --- /dev/null +++ b/src/bootstrap/mod.rs @@ -0,0 +1,3 @@ +pub mod jobs; +pub mod logging; +pub mod stats; diff --git a/src/stats.rs b/src/bootstrap/stats.rs similarity index 66% rename from src/stats.rs rename to src/bootstrap/stats.rs index 8f87c01a3..3b109f297 100644 --- a/src/stats.rs +++ b/src/bootstrap/stats.rs @@ -1,7 +1,7 @@ use crate::tracker::statistics; #[must_use] -pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { +pub fn setup(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { let mut stats_event_sender = None; let mut stats_tracker = statistics::Keeper::new(); @@ -15,13 +15,13 @@ pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option Date: Fri, 17 Mar 2023 10:18:35 +0000 Subject: [PATCH 422/435] refactor: extract app setup and app start mods --- src/app.rs | 68 +++++++++++++++++++++++++++ src/bootstrap/app.rs | 49 +++++++++++++++++++ src/bootstrap/jobs/mod.rs | 68 --------------------------- src/bootstrap/jobs/torrent_cleanup.rs | 2 +- src/bootstrap/mod.rs | 1 + src/lib.rs | 1 + src/main.rs | 44 ++--------------- 7 files changed, 124 insertions(+), 109 deletions(-) create mode 100644 src/app.rs create mode 100644 src/bootstrap/app.rs diff --git a/src/app.rs b/src/app.rs new file mode 100644 index 000000000..5f75449ca --- /dev/null +++ b/src/app.rs @@ -0,0 +1,68 @@ +use std::sync::Arc; + +use log::warn; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::bootstrap::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::servers::http::Version; +use crate::tracker; + +/// # Panics +/// +/// Will panic if the socket address for API can't be parsed. +pub async fn start(config: Arc, tracker: Arc) -> Vec> { + let mut jobs: Vec> = Vec::new(); + + // Load peer keys + if tracker.is_private() { + tracker + .load_keys_from_database() + .await + .expect("Could not retrieve keys from database."); + } + + // Load whitelisted torrents + if tracker.is_whitelisted() { + tracker + .load_whitelist_from_database() + .await + .expect("Could not load whitelist from database."); + } + + // Start the UDP blocks + for udp_tracker_config in &config.udp_trackers { + if !udp_tracker_config.enabled { + continue; + } + + if tracker.is_private() { + warn!( + "Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", + udp_tracker_config.bind_address, config.mode + ); + } else { + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); + } + } + + // Start the HTTP blocks + for http_tracker_config in &config.http_trackers { + if !http_tracker_config.enabled { + continue; + } + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::V1).await); + } + + // Start HTTP API + if config.http_api.enabled { + jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); + } + + // Remove torrents without peers, every interval + if config.inactive_peer_cleanup_interval > 0 { + jobs.push(torrent_cleanup::start_job(&config, &tracker)); + } + + jobs +} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs new file mode 100644 index 000000000..557203b0e --- /dev/null +++ b/src/bootstrap/app.rs @@ -0,0 +1,49 @@ +use std::env; +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use crate::bootstrap::stats; +use crate::tracker::Tracker; +use crate::{bootstrap, ephemeral_instance_keys, static_time, tracker}; + +/// # Panics +/// +/// Will panic if it can't load the configuration from either +/// `./config.toml` file or env var `TORRUST_TRACKER_CONFIG`. +#[must_use] +pub fn setup() -> (Arc, Arc) { + const CONFIG_PATH: &str = "./config.toml"; + const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; + + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize Torrust config + let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { + println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); + Arc::new(Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap()) + } else { + println!("Loading configuration from config file {CONFIG_PATH}"); + Arc::new(Configuration::load_from_file(CONFIG_PATH).unwrap()) + }; + + // Initialize statistics + let (stats_event_sender, stats_repository) = stats::setup(config.tracker_usage_statistics); + + // Initialize Torrust tracker + let tracker = match tracker::Tracker::new(config.clone(), stats_event_sender, stats_repository) { + Ok(tracker) => Arc::new(tracker), + Err(error) => { + panic!("{}", error) + } + }; + + // Initialize logging + bootstrap::logging::setup(&config); + + (config, tracker) +} diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index cbe3f509c..ba44a56ad 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -2,71 +2,3 @@ pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_apis; pub mod udp_tracker; - -use std::sync::Arc; - -use log::warn; -use tokio::task::JoinHandle; -use torrust_tracker_configuration::Configuration; - -use crate::servers::http::Version; -use crate::tracker; - -/// # Panics -/// -/// Will panic if the socket address for API can't be parsed. -pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { - let mut jobs: Vec> = Vec::new(); - - // Load peer keys - if tracker.is_private() { - tracker - .load_keys_from_database() - .await - .expect("Could not retrieve keys from database."); - } - - // Load whitelisted torrents - if tracker.is_whitelisted() { - tracker - .load_whitelist_from_database() - .await - .expect("Could not load whitelist from database."); - } - - // Start the UDP blocks - for udp_tracker_config in &config.udp_trackers { - if !udp_tracker_config.enabled { - continue; - } - - if tracker.is_private() { - warn!( - "Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", - udp_tracker_config.bind_address, config.mode - ); - } else { - jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); - } - } - - // Start the HTTP blocks - for http_tracker_config in &config.http_trackers { - if !http_tracker_config.enabled { - continue; - } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::V1).await); - } - - // Start HTTP API - if config.http_api.enabled { - jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); - } - - // Remove torrents without peers, every interval - if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(config, &tracker)); - } - - jobs -} diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 4c4ed1f53..64240bffe 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -8,7 +8,7 @@ use torrust_tracker_configuration::Configuration; use crate::tracker; #[must_use] -pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { +pub fn start_job(config: &Arc, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs index fd51d9b90..b4ee0558e 100644 --- a/src/bootstrap/mod.rs +++ b/src/bootstrap/mod.rs @@ -1,3 +1,4 @@ +pub mod app; pub mod jobs; pub mod logging; pub mod stats; diff --git a/src/lib.rs b/src/lib.rs index 5cf4ab8eb..70f0c4a4f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,4 @@ +pub mod app; pub mod bootstrap; pub mod protocol; pub mod servers; diff --git a/src/main.rs b/src/main.rs index f8f218fca..87c0fc367 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,49 +1,13 @@ -use std::env; -use std::sync::Arc; - use log::info; -use torrust_tracker::bootstrap::stats::setup; -use torrust_tracker::{bootstrap, ephemeral_instance_keys, static_time, tracker}; -use torrust_tracker_configuration::Configuration; +use torrust_tracker::{app, bootstrap}; #[tokio::main] async fn main() { - const CONFIG_PATH: &str = "./config.toml"; - const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; - - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize Torrust config - let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { - println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); - Arc::new(Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap()) - } else { - println!("Loading configuration from config file {CONFIG_PATH}"); - Arc::new(Configuration::load_from_file(CONFIG_PATH).unwrap()) - }; - - // Initialize statistics - let (stats_event_sender, stats_repository) = setup(config.tracker_usage_statistics); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(config.clone(), stats_event_sender, stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; - - // Initialize logging - bootstrap::logging::setup(&config); + let (config, tracker) = bootstrap::app::setup(); - // Run jobs - let jobs = bootstrap::jobs::setup(&config, tracker.clone()).await; + let jobs = app::start(config.clone(), tracker.clone()).await; - // handle the signals here + // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Torrust shutting down.."); From 9ffcd6bec0c111c896bb4ab3280c71df7b2d7190 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 11:21:49 +0000 Subject: [PATCH 423/435] refactor: rename mod and extract bit_torrent mod `protocol` nod contains logic not only for BitTorrent protocol. --- src/lib.rs | 2 +- src/protocol/mod.rs | 5 ----- src/servers/apis/v1/context/auth_key/resources.rs | 4 ++-- src/servers/apis/v1/context/torrent/handlers.rs | 2 +- .../apis/v1/context/torrent/resources/torrent.rs | 4 ++-- src/servers/apis/v1/context/whitelist/handlers.rs | 2 +- src/servers/http/percent_encoding.rs | 4 ++-- .../http/v1/extractors/announce_request.rs | 2 +- src/servers/http/v1/extractors/scrape_request.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 4 ++-- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/requests/announce.rs | 4 ++-- src/servers/http/v1/requests/scrape.rs | 4 ++-- src/servers/http/v1/responses/scrape.rs | 2 +- src/servers/http/v1/services/announce.rs | 6 +++--- src/servers/http/v1/services/scrape.rs | 6 +++--- src/servers/udp/connection_cookie.rs | 10 +++++----- src/servers/udp/handlers.rs | 6 +++--- src/servers/udp/peer_builder.rs | 2 +- src/servers/udp/request.rs | 2 +- src/{protocol => shared/bit_torrent}/common.rs | 0 src/{protocol => shared/bit_torrent}/info_hash.rs | 0 src/shared/bit_torrent/mod.rs | 2 ++ src/{protocol => shared}/clock/mod.rs | 15 ++++++++------- src/{protocol => shared}/clock/time_extent.rs | 4 ++-- src/{protocol => shared/clock}/utils.rs | 2 +- src/{protocol => shared}/crypto.rs | 4 ++-- src/shared/mod.rs | 3 +++ src/tracker/auth.rs | 6 +++--- src/tracker/databases/mod.rs | 2 +- src/tracker/databases/mysql.rs | 4 ++-- src/tracker/databases/sqlite.rs | 4 ++-- src/tracker/error.rs | 2 +- src/tracker/mod.rs | 10 +++++----- src/tracker/peer.rs | 10 ++++------ src/tracker/services/torrent.rs | 8 ++++---- src/tracker/torrent.rs | 6 +++--- tests/common/fixtures.rs | 2 +- tests/servers/api/test_environment.rs | 2 +- tests/servers/api/v1/contract/context/stats.rs | 2 +- tests/servers/api/v1/contract/context/torrent.rs | 2 +- .../servers/api/v1/contract/context/whitelist.rs | 2 +- tests/servers/http/requests/announce.rs | 2 +- tests/servers/http/requests/scrape.rs | 2 +- tests/servers/http/test_environment.rs | 2 +- tests/servers/http/v1/contract.rs | 12 ++++++------ tests/servers/udp/test_environment.rs | 2 +- 47 files changed, 93 insertions(+), 94 deletions(-) delete mode 100644 src/protocol/mod.rs rename src/{protocol => shared/bit_torrent}/common.rs (100%) rename src/{protocol => shared/bit_torrent}/info_hash.rs (100%) create mode 100644 src/shared/bit_torrent/mod.rs rename src/{protocol => shared}/clock/mod.rs (96%) rename src/{protocol => shared}/clock/time_extent.rs (99%) rename src/{protocol => shared/clock}/utils.rs (89%) rename src/{protocol => shared}/crypto.rs (94%) create mode 100644 src/shared/mod.rs diff --git a/src/lib.rs b/src/lib.rs index 70f0c4a4f..cf4439c3e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,7 @@ pub mod app; pub mod bootstrap; -pub mod protocol; pub mod servers; +pub mod shared; pub mod tracker; #[macro_use] diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs deleted file mode 100644 index bd4310dcf..000000000 --- a/src/protocol/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod clock; -pub mod common; -pub mod crypto; -pub mod info_hash; -pub mod utils; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index cf43a6f3d..400b34eb7 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -2,7 +2,7 @@ use std::convert::From; use serde::{Deserialize, Serialize}; -use crate::protocol::clock::convert_from_iso_8601_to_timestamp; +use crate::shared::clock::convert_from_iso_8601_to_timestamp; use crate::tracker::auth::{self, Key}; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -36,7 +36,7 @@ mod tests { use std::time::Duration; use super::AuthKey; - use crate::protocol::clock::{Current, TimeNow}; + use crate::shared::clock::{Current, TimeNow}; use crate::tracker::auth::{self, Key}; struct TestTime { diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 45ffbcf22..4032f2e9a 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -8,9 +8,9 @@ use serde::{de, Deserialize, Deserializer}; use super::resources::torrent::ListItem; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::protocol::info_hash::InfoHash; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; use crate::tracker::Tracker; diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index 577ac279c..e328f80c4 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -75,10 +75,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use super::Torrent; - use crate::protocol::clock::DurationSinceUnixEpoch; - use crate::protocol::info_hash::InfoHash; use crate::servers::apis::v1::context::torrent::resources::peer::Peer; use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::peer; use crate::tracker::services::torrent::{BasicInfo, Info}; diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index 2ca70cba7..25e285c0b 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -7,9 +7,9 @@ use axum::response::Response; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::protocol::info_hash::InfoHash; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::Tracker; pub async fn add_torrent_to_whitelist_handler( diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index c824c8df7..019735e0f 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -1,4 +1,4 @@ -use crate::protocol::info_hash::{ConversionError, InfoHash}; +use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; use crate::tracker::peer::{self, IdConversionError}; /// # Errors @@ -21,8 +21,8 @@ pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { @@ -39,8 +39,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; - use crate::protocol::clock::DurationSinceUnixEpoch; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::services::common::tracker_factory; use crate::tracker::{peer, Tracker}; diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index a55d40801..4a75145c1 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -4,7 +4,7 @@ use std::panic::Location; use aquatic_udp_protocol::ConnectionId; use super::error::Error; -use crate::protocol::clock::time_extent::{Extent, TimeExtent}; +use crate::shared::clock::time_extent::{Extent, TimeExtent}; pub type Cookie = [u8; 8]; @@ -61,8 +61,8 @@ mod cookie_builder { use std::net::SocketAddr; use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; - use crate::protocol::crypto::keys::seeds::{Current, Keeper}; + use crate::shared::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; + use crate::shared::crypto::keys::seeds::{Current, Keeper}; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) @@ -90,9 +90,9 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use super::cookie_builder::{self}; - use crate::protocol::clock::time_extent::{self, Extent}; - use crate::protocol::clock::{Stopped, StoppedTime}; use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; + use crate::shared::clock::time_extent::{self, Extent}; + use crate::shared::clock::{Stopped, StoppedTime}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 1544e13cc..7eb971d05 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -9,11 +9,11 @@ use aquatic_udp_protocol::{ use log::debug; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; -use crate::protocol::common::MAX_SCRAPE_TORRENTS; -use crate::protocol::info_hash::InfoHash; use crate::servers::udp::error::Error; use crate::servers::udp::peer_builder; use crate::servers::udp::request::AnnounceWrapper; +use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::{statistics, Tracker}; pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { @@ -241,7 +241,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::protocol::clock::{Current, Time}; + use crate::shared::clock::{Current, Time}; use crate::tracker::services::common::tracker_factory; use crate::tracker::{self, peer}; diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index 84eae64f9..8d8852dc7 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, SocketAddr}; use super::request::AnnounceWrapper; -use crate::protocol::clock::{Current, Time}; +use crate::shared::clock::{Current, Time}; use crate::tracker::peer::{Id, Peer}; #[must_use] diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs index 28d75f860..4be99e6d0 100644 --- a/src/servers/udp/request.rs +++ b/src/servers/udp/request.rs @@ -1,6 +1,6 @@ use aquatic_udp_protocol::AnnounceRequest; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; pub struct AnnounceWrapper { pub announce_request: AnnounceRequest, diff --git a/src/protocol/common.rs b/src/shared/bit_torrent/common.rs similarity index 100% rename from src/protocol/common.rs rename to src/shared/bit_torrent/common.rs diff --git a/src/protocol/info_hash.rs b/src/shared/bit_torrent/info_hash.rs similarity index 100% rename from src/protocol/info_hash.rs rename to src/shared/bit_torrent/info_hash.rs diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs new file mode 100644 index 000000000..7579a0780 --- /dev/null +++ b/src/shared/bit_torrent/mod.rs @@ -0,0 +1,2 @@ +pub mod common; +pub mod info_hash; diff --git a/src/protocol/clock/mod.rs b/src/shared/clock/mod.rs similarity index 96% rename from src/protocol/clock/mod.rs rename to src/shared/clock/mod.rs index 73df37b58..399fb6b9b 100644 --- a/src/protocol/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -1,3 +1,6 @@ +pub mod time_extent; +pub mod utils; + use std::num::IntErrorKind; use std::str::FromStr; use std::time::Duration; @@ -77,7 +80,7 @@ pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) mod tests { use std::any::TypeId; - use crate::protocol::clock::{Current, Stopped, Time, Working}; + use crate::shared::clock::{Current, Stopped, Time, Working}; #[test] fn it_should_be_the_stopped_clock_as_default_when_testing() { @@ -95,7 +98,7 @@ mod tests { mod timestamp { use chrono::{DateTime, NaiveDateTime, Utc}; - use crate::protocol::clock::{ + use crate::shared::clock::{ convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, DurationSinceUnixEpoch, }; @@ -227,7 +230,7 @@ mod stopped_clock { use std::thread; use std::time::Duration; - use crate::protocol::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; + use crate::shared::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; #[test] fn it_should_default_to_zero_when_testing() { @@ -286,7 +289,7 @@ mod stopped_clock { use std::cell::RefCell; use std::time::SystemTime; - use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::static_time; pub fn get_app_start_time() -> DurationSinceUnixEpoch { @@ -311,7 +314,7 @@ mod stopped_clock { mod tests { use std::time::Duration; - use crate::protocol::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; + use crate::shared::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; #[test] fn it_should_get_the_zero_start_time_when_testing() { @@ -326,5 +329,3 @@ mod stopped_clock { } } } - -pub mod time_extent; diff --git a/src/protocol/clock/time_extent.rs b/src/shared/clock/time_extent.rs similarity index 99% rename from src/protocol/clock/time_extent.rs rename to src/shared/clock/time_extent.rs index b4c20cd70..64142c404 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/shared/clock/time_extent.rs @@ -176,10 +176,10 @@ pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; #[cfg(test)] mod test { - use crate::protocol::clock::time_extent::{ + use crate::shared::clock::time_extent::{ checked_duration_from_nanos, Base, DefaultTimeExtentMaker, Extent, Make, Multiplier, Product, TimeExtent, MAX, ZERO, }; - use crate::protocol::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); diff --git a/src/protocol/utils.rs b/src/shared/clock/utils.rs similarity index 89% rename from src/protocol/utils.rs rename to src/shared/clock/utils.rs index cec02ceaf..9127f97b1 100644 --- a/src/protocol/utils.rs +++ b/src/shared/clock/utils.rs @@ -1,4 +1,4 @@ -use super::clock::DurationSinceUnixEpoch; +use super::DurationSinceUnixEpoch; /// # Errors /// diff --git a/src/protocol/crypto.rs b/src/shared/crypto.rs similarity index 94% rename from src/protocol/crypto.rs rename to src/shared/crypto.rs index a335e2dba..848dcd36b 100644 --- a/src/protocol/crypto.rs +++ b/src/shared/crypto.rs @@ -74,8 +74,8 @@ pub mod keys { use std::convert::TryInto; use crate::ephemeral_instance_keys::RANDOM_SEED; - use crate::protocol::crypto::keys::seeds::detail::ZEROED_TEST_SEED; - use crate::protocol::crypto::keys::seeds::CURRENT_SEED; + use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; + use crate::shared::crypto::keys::seeds::CURRENT_SEED; #[test] fn it_should_have_a_zero_test_seed() { diff --git a/src/shared/mod.rs b/src/shared/mod.rs new file mode 100644 index 000000000..4b0d9138e --- /dev/null +++ b/src/shared/mod.rs @@ -0,0 +1,3 @@ +pub mod bit_torrent; +pub mod clock; +pub mod crypto; diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 75bc543a8..31e1f50e4 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -11,8 +11,8 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_located_error::LocatedError; -use crate::protocol::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; -use crate::protocol::common::AUTH_KEY_LENGTH; +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; #[must_use] /// # Panics @@ -146,7 +146,7 @@ mod tests { use std::str::FromStr; use std::time::Duration; - use crate::protocol::clock::{Current, StoppedTime}; + use crate::shared::clock::{Current, StoppedTime}; use crate::tracker::auth; #[test] diff --git a/src/tracker/databases/mod.rs b/src/tracker/databases/mod.rs index 0af6f5723..f68288bbe 100644 --- a/src/tracker/databases/mod.rs +++ b/src/tracker/databases/mod.rs @@ -8,7 +8,7 @@ use std::marker::PhantomData; use async_trait::async_trait; use self::error::Error; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; pub(self) struct Builder diff --git a/src/tracker/databases/mysql.rs b/src/tracker/databases/mysql.rs index ded9e1617..7e4aab99e 100644 --- a/src/tracker/databases/mysql.rs +++ b/src/tracker/databases/mysql.rs @@ -10,8 +10,8 @@ use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::DatabaseDriver; use super::{Database, Error}; -use crate::protocol::common::AUTH_KEY_LENGTH; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::auth::{self, Key}; const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; diff --git a/src/tracker/databases/sqlite.rs b/src/tracker/databases/sqlite.rs index e230ac18f..931289183 100644 --- a/src/tracker/databases/sqlite.rs +++ b/src/tracker/databases/sqlite.rs @@ -7,8 +7,8 @@ use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::DatabaseDriver; use super::{Database, Error}; -use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::auth::{self, Key}; const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; diff --git a/src/tracker/error.rs b/src/tracker/error.rs index 10ca5ec19..aaf755e0d 100644 --- a/src/tracker/error.rs +++ b/src/tracker/error.rs @@ -16,7 +16,7 @@ pub enum Error { // Authorization errors #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { - info_hash: crate::protocol::info_hash::InfoHash, + info_hash: crate::shared::bit_torrent::info_hash::InfoHash, location: &'static Location<'static>, }, } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 9ed28b684..6b8c27076 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -22,7 +22,7 @@ use self::auth::Key; use self::error::Error; use self::peer::Peer; use self::torrent::{SwarmMetadata, SwarmStats}; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::databases::Database; pub struct Tracker { @@ -557,8 +557,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; - use crate::protocol::clock::DurationSinceUnixEpoch; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::peer::{self, Peer}; use crate::tracker::services::common::tracker_factory; use crate::tracker::{TorrentsMetrics, Tracker}; @@ -911,7 +911,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; use crate::tracker::{ScrapeData, SwarmMetadata}; @@ -1068,7 +1068,7 @@ mod tests { mod handling_an_scrape_request { - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 3012770bb..6a298c9df 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -6,9 +6,9 @@ use serde; use serde::Serialize; use thiserror::Error; -use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::protocol::utils::ser_unix_time_value; +use crate::shared::bit_torrent::common::{AnnounceEventDef, NumberOfBytesDef}; +use crate::shared::clock::utils::ser_unix_time_value; +use crate::shared::clock::DurationSinceUnixEpoch; #[derive(PartialEq, Eq, Debug)] pub enum IPVersion { @@ -28,8 +28,6 @@ pub struct Peer { pub downloaded: NumberOfBytes, #[serde(with = "NumberOfBytesDef")] pub left: NumberOfBytes, // The number of bytes this peer still has to download - // code-review: aquatic_udp_protocol::request::AnnounceEvent is used also for the HTTP tracker. - // Maybe we should use our own enum and use the”is one only for the UDP tracker. #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } @@ -397,7 +395,7 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde_json::Value; - use crate::protocol::clock::{Current, Time}; + use crate::shared::clock::{Current, Time}; use crate::tracker::peer::{self, Peer}; #[test] diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index b04b4e1dc..e9e254582 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use serde::Deserialize; -use crate::protocol::info_hash::InfoHash; +use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::peer::Peer; use crate::tracker::Tracker; @@ -117,7 +117,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::protocol::clock::DurationSinceUnixEpoch; + use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::peer; fn sample_peer() -> peer::Peer { @@ -140,7 +140,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrent_info, Info}; @@ -195,7 +195,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::protocol::info_hash::InfoHash; + use crate::shared::bit_torrent::info_hash::InfoHash; use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 4a871aa89..882e52ff1 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -4,8 +4,8 @@ use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; use super::peer::{self, Peer}; -use crate::protocol::clock::{Current, TimeNow}; -use crate::protocol::common::MAX_SCRAPE_TORRENTS; +use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::shared::clock::{Current, TimeNow}; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct Entry { @@ -134,7 +134,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; + use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; use crate::tracker::peer; use crate::tracker::torrent::Entry; diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index d4b3e9812..7062c8376 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; +use torrust_tracker::shared::clock::DurationSinceUnixEpoch; use torrust_tracker::tracker::peer::{self, Id, Peer}; pub struct PeerBuilder { diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index 07ba46fff..c9b693320 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 038272963..45f7e604a 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,7 +1,7 @@ use std::str::FromStr; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 69d600a8b..ab497787f 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::common::fixtures::PeerBuilder; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 67992642f..60ab4c901 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index 414c118ef..20c5ddaa7 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -3,7 +3,7 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; -use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Id; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs index d7f7cd581..9e4257d6c 100644 --- a/tests/servers/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -1,7 +1,7 @@ use std::fmt; use std::str::FromStr; -use torrust_tracker::protocol::info_hash::InfoHash; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index aa414d62e..e6013540d 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index f67b0c5e5..b508dfc39 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -72,7 +72,7 @@ mod for_all_config_modes { use local_ip_address::local_ip; use reqwest::Response; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; @@ -833,7 +833,7 @@ mod for_all_config_modes { use std::net::IpAddr; use std::str::FromStr; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; @@ -1055,7 +1055,7 @@ mod configured_as_whitelisted { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; @@ -1104,7 +1104,7 @@ mod configured_as_whitelisted { mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; @@ -1201,7 +1201,7 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker_test_helpers::configuration; @@ -1278,7 +1278,7 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::auth::Key; use torrust_tracker::tracker::peer; use torrust_tracker_test_helpers::configuration; diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index 11a2cf6cd..c9f67c987 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -1,8 +1,8 @@ use std::net::SocketAddr; use std::sync::Arc; -use torrust_tracker::protocol::info_hash::InfoHash; use torrust_tracker::servers::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; From 2d99866931a6dfc8bf3ad65e2d438145cafd2b1c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 11:55:51 +0000 Subject: [PATCH 424/435] refactor: move static vars --- src/bootstrap/app.rs | 4 +++- src/lib.rs | 19 ------------------- src/shared/clock/mod.rs | 4 ++-- src/shared/clock/static_time.rs | 5 +++++ src/shared/crypto.rs | 21 ++++++++++++++++----- tests/common/tracker.rs | 4 +++- 6 files changed, 29 insertions(+), 28 deletions(-) create mode 100644 src/shared/clock/static_time.rs diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 557203b0e..7fb1bf7ca 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -4,8 +4,10 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; use crate::bootstrap::stats; +use crate::shared::clock::static_time; +use crate::shared::crypto::ephemeral_instance_keys; use crate::tracker::Tracker; -use crate::{bootstrap, ephemeral_instance_keys, static_time, tracker}; +use crate::{bootstrap, tracker}; /// # Panics /// diff --git a/src/lib.rs b/src/lib.rs index cf4439c3e..bd775f8cf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,22 +6,3 @@ pub mod tracker; #[macro_use] extern crate lazy_static; - -pub mod static_time { - use std::time::SystemTime; - - lazy_static! { - pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); - } -} - -pub mod ephemeral_instance_keys { - use rand::rngs::ThreadRng; - use rand::Rng; - - pub type Seed = [u8; 32]; - - lazy_static! { - pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); - } -} diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs index 399fb6b9b..b5001e10e 100644 --- a/src/shared/clock/mod.rs +++ b/src/shared/clock/mod.rs @@ -1,3 +1,4 @@ +pub mod static_time; pub mod time_extent; pub mod utils; @@ -289,8 +290,7 @@ mod stopped_clock { use std::cell::RefCell; use std::time::SystemTime; - use crate::shared::clock::DurationSinceUnixEpoch; - use crate::static_time; + use crate::shared::clock::{static_time, DurationSinceUnixEpoch}; pub fn get_app_start_time() -> DurationSinceUnixEpoch { (*static_time::TIME_AT_APP_START) diff --git a/src/shared/clock/static_time.rs b/src/shared/clock/static_time.rs new file mode 100644 index 000000000..f916cec9c --- /dev/null +++ b/src/shared/clock/static_time.rs @@ -0,0 +1,5 @@ +use std::time::SystemTime; + +lazy_static! { + pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); +} diff --git a/src/shared/crypto.rs b/src/shared/crypto.rs index 848dcd36b..c10a417c1 100644 --- a/src/shared/crypto.rs +++ b/src/shared/crypto.rs @@ -1,8 +1,19 @@ +pub mod ephemeral_instance_keys { + use rand::rngs::ThreadRng; + use rand::Rng; + + pub type Seed = [u8; 32]; + + lazy_static! { + pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); + } +} + pub mod keys { pub mod seeds { use self::detail::CURRENT_SEED; - use crate::ephemeral_instance_keys::{Seed, RANDOM_SEED}; + use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; pub trait Keeper { type Seed: Sized + Default + AsMut<[u8]>; @@ -33,7 +44,7 @@ pub mod keys { mod tests { use super::detail::ZEROED_TEST_SEED; use super::{Current, Instance, Keeper}; - use crate::ephemeral_instance_keys::Seed; + use crate::shared::crypto::ephemeral_instance_keys::Seed; pub struct ZeroedTestSeed; @@ -58,7 +69,7 @@ pub mod keys { } mod detail { - use crate::ephemeral_instance_keys::Seed; + use crate::shared::crypto::ephemeral_instance_keys::Seed; #[allow(dead_code)] pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; @@ -67,13 +78,13 @@ pub mod keys { pub use ZEROED_TEST_SEED as CURRENT_SEED; #[cfg(not(test))] - pub use crate::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; + pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; #[cfg(test)] mod tests { use std::convert::TryInto; - use crate::ephemeral_instance_keys::RANDOM_SEED; + use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; use crate::shared::crypto::keys::seeds::CURRENT_SEED; diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index 1d06009ba..92c1df7bf 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -1,8 +1,10 @@ use std::sync::Arc; +use torrust_tracker::bootstrap; +use torrust_tracker::shared::clock::static_time; +use torrust_tracker::shared::crypto::ephemeral_instance_keys; use torrust_tracker::tracker::services::common::tracker_factory; use torrust_tracker::tracker::Tracker; -use torrust_tracker::{bootstrap, ephemeral_instance_keys, static_time}; // TODO: Move to test-helpers crate once `Tracker` is isolated. #[allow(clippy::module_name_repetitions)] From e2553b85b4ea4e18ea44417410fb6bbfd90f5437 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 12:11:52 +0000 Subject: [PATCH 425/435] refactor: extract mod into file --- src/shared/crypto.rs | 109 ------------------- src/shared/crypto/ephemeral_instance_keys.rs | 8 ++ src/shared/crypto/keys.rs | 95 ++++++++++++++++ src/shared/crypto/mod.rs | 2 + 4 files changed, 105 insertions(+), 109 deletions(-) delete mode 100644 src/shared/crypto.rs create mode 100644 src/shared/crypto/ephemeral_instance_keys.rs create mode 100644 src/shared/crypto/keys.rs create mode 100644 src/shared/crypto/mod.rs diff --git a/src/shared/crypto.rs b/src/shared/crypto.rs deleted file mode 100644 index c10a417c1..000000000 --- a/src/shared/crypto.rs +++ /dev/null @@ -1,109 +0,0 @@ -pub mod ephemeral_instance_keys { - use rand::rngs::ThreadRng; - use rand::Rng; - - pub type Seed = [u8; 32]; - - lazy_static! { - pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); - } -} - -pub mod keys { - - pub mod seeds { - use self::detail::CURRENT_SEED; - use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; - - pub trait Keeper { - type Seed: Sized + Default + AsMut<[u8]>; - fn get_seed() -> &'static Self::Seed; - } - - pub struct Instance; - pub struct Current; - - impl Keeper for Instance { - type Seed = Seed; - - fn get_seed() -> &'static Self::Seed { - &RANDOM_SEED - } - } - - impl Keeper for Current { - type Seed = Seed; - - #[allow(clippy::needless_borrow)] - fn get_seed() -> &'static Self::Seed { - &CURRENT_SEED - } - } - - #[cfg(test)] - mod tests { - use super::detail::ZEROED_TEST_SEED; - use super::{Current, Instance, Keeper}; - use crate::shared::crypto::ephemeral_instance_keys::Seed; - - pub struct ZeroedTestSeed; - - impl Keeper for ZeroedTestSeed { - type Seed = Seed; - - #[allow(clippy::needless_borrow)] - fn get_seed() -> &'static Self::Seed { - &ZEROED_TEST_SEED - } - } - - #[test] - fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { - assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); - } - - #[test] - fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { - assert_ne!(Current::get_seed(), Instance::get_seed()); - } - } - - mod detail { - use crate::shared::crypto::ephemeral_instance_keys::Seed; - - #[allow(dead_code)] - pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; - - #[cfg(test)] - pub use ZEROED_TEST_SEED as CURRENT_SEED; - - #[cfg(not(test))] - pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; - - #[cfg(test)] - mod tests { - use std::convert::TryInto; - - use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; - use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; - use crate::shared::crypto::keys::seeds::CURRENT_SEED; - - #[test] - fn it_should_have_a_zero_test_seed() { - assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); - } - - #[test] - fn it_should_default_to_zeroed_seed_when_testing() { - assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); - } - - #[test] - fn it_should_have_a_large_random_seed() { - assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); - assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); - } - } - } - } -} diff --git a/src/shared/crypto/ephemeral_instance_keys.rs b/src/shared/crypto/ephemeral_instance_keys.rs new file mode 100644 index 000000000..635d10fbd --- /dev/null +++ b/src/shared/crypto/ephemeral_instance_keys.rs @@ -0,0 +1,8 @@ +use rand::rngs::ThreadRng; +use rand::Rng; + +pub type Seed = [u8; 32]; + +lazy_static! { + pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); +} diff --git a/src/shared/crypto/keys.rs b/src/shared/crypto/keys.rs new file mode 100644 index 000000000..5e04eb551 --- /dev/null +++ b/src/shared/crypto/keys.rs @@ -0,0 +1,95 @@ +pub mod seeds { + use self::detail::CURRENT_SEED; + use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; + + pub trait Keeper { + type Seed: Sized + Default + AsMut<[u8]>; + fn get_seed() -> &'static Self::Seed; + } + + pub struct Instance; + pub struct Current; + + impl Keeper for Instance { + type Seed = Seed; + + fn get_seed() -> &'static Self::Seed { + &RANDOM_SEED + } + } + + impl Keeper for Current { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &CURRENT_SEED + } + } + + #[cfg(test)] + mod tests { + use super::detail::ZEROED_TEST_SEED; + use super::{Current, Instance, Keeper}; + use crate::shared::crypto::ephemeral_instance_keys::Seed; + + pub struct ZeroedTestSeed; + + impl Keeper for ZeroedTestSeed { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &ZEROED_TEST_SEED + } + } + + #[test] + fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { + assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); + } + + #[test] + fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { + assert_ne!(Current::get_seed(), Instance::get_seed()); + } + } + + mod detail { + use crate::shared::crypto::ephemeral_instance_keys::Seed; + + #[allow(dead_code)] + pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; + + #[cfg(test)] + pub use ZEROED_TEST_SEED as CURRENT_SEED; + + #[cfg(not(test))] + pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; + + #[cfg(test)] + mod tests { + use std::convert::TryInto; + + use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; + use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; + use crate::shared::crypto::keys::seeds::CURRENT_SEED; + + #[test] + fn it_should_have_a_zero_test_seed() { + assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); + } + + #[test] + fn it_should_default_to_zeroed_seed_when_testing() { + assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); + } + + #[test] + fn it_should_have_a_large_random_seed() { + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); + } + } + } +} diff --git a/src/shared/crypto/mod.rs b/src/shared/crypto/mod.rs new file mode 100644 index 000000000..066eb0f46 --- /dev/null +++ b/src/shared/crypto/mod.rs @@ -0,0 +1,2 @@ +pub mod ephemeral_instance_keys; +pub mod keys; From 84c45827cb498cc962e91c6ec88ecd7a3eb732a5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 12:21:16 +0000 Subject: [PATCH 426/435] refactor: remove duplicate code for static vars initialization --- src/bootstrap/app.rs | 14 +++++++++----- tests/common/tracker.rs | 8 +------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 7fb1bf7ca..8fa6194f2 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -9,6 +9,14 @@ use crate::shared::crypto::ephemeral_instance_keys; use crate::tracker::Tracker; use crate::{bootstrap, tracker}; +pub fn initialize_static() { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); +} + /// # Panics /// /// Will panic if it can't load the configuration from either @@ -18,11 +26,7 @@ pub fn setup() -> (Arc, Arc) { const CONFIG_PATH: &str = "./config.toml"; const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + initialize_static(); // Initialize Torrust config let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index 92c1df7bf..8579609d9 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -1,19 +1,13 @@ use std::sync::Arc; use torrust_tracker::bootstrap; -use torrust_tracker::shared::clock::static_time; -use torrust_tracker::shared::crypto::ephemeral_instance_keys; use torrust_tracker::tracker::services::common::tracker_factory; use torrust_tracker::tracker::Tracker; // TODO: Move to test-helpers crate once `Tracker` is isolated. #[allow(clippy::module_name_repetitions)] pub fn new_tracker(configuration: Arc) -> Arc { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + bootstrap::app::initialize_static(); // Initialize logging bootstrap::logging::setup(&configuration); From fe392c792e28728341a1d1410991bbc155db09e2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 12:51:46 +0000 Subject: [PATCH 427/435] refactor: reorganize tracker factory mod --- src/bootstrap/app.rs | 1 - src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/services/announce.rs | 2 +- src/servers/http/v1/services/scrape.rs | 2 +- src/servers/udp/handlers.rs | 12 +++++------ src/tracker/mod.rs | 2 +- src/tracker/services/common.rs | 25 ---------------------- src/tracker/services/mod.rs | 27 +++++++++++++++++++++++- src/tracker/services/statistics.rs | 2 +- src/tracker/services/torrent.rs | 4 ++-- tests/common/tracker.rs | 5 +---- 12 files changed, 41 insertions(+), 45 deletions(-) delete mode 100644 src/tracker/services/common.rs diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 8fa6194f2..b76641ecd 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -48,7 +48,6 @@ pub fn setup() -> (Arc, Arc) { } }; - // Initialize logging bootstrap::logging::setup(&config); (config, tracker) diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index a93dccabb..db41388ab 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -145,7 +145,7 @@ mod tests { use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::{peer, Tracker}; fn private_tracker() -> Tracker { diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 9912723b8..f55194810 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -103,7 +103,7 @@ mod tests { use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::Tracker; fn private_tracker() -> Tracker { diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 58867634f..116dc1e95 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -32,7 +32,7 @@ mod tests { use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 120a5068d..82ecc72e0 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -41,7 +41,7 @@ mod tests { use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::{peer, Tracker}; fn public_tracker() -> Tracker { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 7eb971d05..e00203cfc 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -242,8 +242,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::shared::clock::{Current, Time}; - use crate::tracker::services::common::tracker_factory; - use crate::tracker::{self, peer}; + use crate::tracker::services::tracker_factory; + use crate::tracker::{peer, Tracker}; fn tracker_configuration() -> Arc { Arc::new(default_testing_tracker_configuration()) @@ -253,19 +253,19 @@ mod tests { configuration::ephemeral() } - fn public_tracker() -> Arc { + fn public_tracker() -> Arc { initialized_tracker(configuration::ephemeral_mode_public().into()) } - fn private_tracker() -> Arc { + fn private_tracker() -> Arc { initialized_tracker(configuration::ephemeral_mode_private().into()) } - fn whitelisted_tracker() -> Arc { + fn whitelisted_tracker() -> Arc { initialized_tracker(configuration::ephemeral_mode_whitelisted().into()) } - fn initialized_tracker(configuration: Arc) -> Arc { + fn initialized_tracker(configuration: Arc) -> Arc { tracker_factory(configuration).into() } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index 6b8c27076..a89d6df2c 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -560,7 +560,7 @@ mod tests { use crate::shared::bit_torrent::info_hash::InfoHash; use crate::shared::clock::DurationSinceUnixEpoch; use crate::tracker::peer::{self, Peer}; - use crate::tracker::services::common::tracker_factory; + use crate::tracker::services::tracker_factory; use crate::tracker::{TorrentsMetrics, Tracker}; fn public_tracker() -> Tracker { diff --git a/src/tracker/services/common.rs b/src/tracker/services/common.rs deleted file mode 100644 index 757725263..000000000 --- a/src/tracker/services/common.rs +++ /dev/null @@ -1,25 +0,0 @@ -use std::sync::Arc; - -use torrust_tracker_configuration::Configuration; - -use crate::tracker::statistics::Keeper; -use crate::tracker::Tracker; - -/// # Panics -/// -/// Will panic if tracker cannot be instantiated. -#[must_use] -pub fn tracker_factory(configuration: Arc) -> Tracker { - // todo: the tracker initialization is duplicated in many places. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - - // Initialize Torrust tracker - match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } -} diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs index 2fd557d54..69b0320e8 100644 --- a/src/tracker/services/mod.rs +++ b/src/tracker/services/mod.rs @@ -1,3 +1,28 @@ -pub mod common; pub mod statistics; pub mod torrent; + +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use crate::tracker::statistics::Keeper; +use crate::tracker::Tracker; + +/// # Panics +/// +/// Will panic if tracker cannot be instantiated. +#[must_use] +pub fn tracker_factory(configuration: Arc) -> Tracker { + // todo: the tracker initialization is duplicated in many places. + + // Initialize stats tracker + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + + // Initialize Torrust tracker + match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } +} diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics.rs index 28cd0b962..cf05fcf90 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics.rs @@ -40,8 +40,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::tracker; - use crate::tracker::services::common::tracker_factory; use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; + use crate::tracker::services::tracker_factory; pub fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs index e9e254582..30d24eb00 100644 --- a/src/tracker/services/torrent.rs +++ b/src/tracker/services/torrent.rs @@ -141,9 +141,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrent_info, Info}; + use crate::tracker::services::tracker_factory; pub fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) @@ -196,9 +196,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::common::tracker_factory; use crate::tracker::services::torrent::tests::sample_peer; use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; + use crate::tracker::services::tracker_factory; pub fn tracker_configuration() -> Arc { Arc::new(configuration::ephemeral()) diff --git a/tests/common/tracker.rs b/tests/common/tracker.rs index 8579609d9..d95573702 100644 --- a/tests/common/tracker.rs +++ b/tests/common/tracker.rs @@ -1,15 +1,12 @@ use std::sync::Arc; use torrust_tracker::bootstrap; -use torrust_tracker::tracker::services::common::tracker_factory; +use torrust_tracker::tracker::services::tracker_factory; use torrust_tracker::tracker::Tracker; -// TODO: Move to test-helpers crate once `Tracker` is isolated. -#[allow(clippy::module_name_repetitions)] pub fn new_tracker(configuration: Arc) -> Arc { bootstrap::app::initialize_static(); - // Initialize logging bootstrap::logging::setup(&configuration); Arc::new(tracker_factory(configuration)) From cbca065142781f47db827f24f94f9b4566094a19 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 12:56:36 +0000 Subject: [PATCH 428/435] refactor: rename mod and function To follow production code conventions. --- tests/common/{tracker.rs => app.rs} | 2 +- tests/common/mod.rs | 2 +- tests/servers/api/test_environment.rs | 4 ++-- tests/servers/http/test_environment.rs | 4 ++-- tests/servers/udp/test_environment.rs | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) rename tests/common/{tracker.rs => app.rs} (72%) diff --git a/tests/common/tracker.rs b/tests/common/app.rs similarity index 72% rename from tests/common/tracker.rs rename to tests/common/app.rs index d95573702..132faeb06 100644 --- a/tests/common/tracker.rs +++ b/tests/common/app.rs @@ -4,7 +4,7 @@ use torrust_tracker::bootstrap; use torrust_tracker::tracker::services::tracker_factory; use torrust_tracker::tracker::Tracker; -pub fn new_tracker(configuration: Arc) -> Arc { +pub fn setup_with_config(configuration: Arc) -> Arc { bootstrap::app::initialize_static(); bootstrap::logging::setup(&configuration); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 9452cc111..51a8a5b03 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,4 +1,4 @@ +pub mod app; pub mod fixtures; pub mod http; -pub mod tracker; pub mod udp; diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index c9b693320..be767f05e 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use super::connection_info::ConnectionInfo; -use crate::common::tracker::new_tracker; +use crate::common::app::setup_with_config; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -39,7 +39,7 @@ impl TestEnvironment { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = new_tracker(cfg.clone()); + let tracker = setup_with_config(cfg.clone()); let api_server = api_server(cfg.http_api.clone()); diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index e6013540d..a1b3444dd 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -5,7 +5,7 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::tracker::new_tracker; +use crate::common::app::setup_with_config; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment>; @@ -39,7 +39,7 @@ impl TestEnvironment> { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = new_tracker(cfg.clone()); + let tracker = setup_with_config(cfg.clone()); let http_server = http_server(cfg.http_trackers[0].clone()); diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index c9f67c987..fdbb9036d 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::tracker::new_tracker; +use crate::common::app::setup_with_config; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -41,7 +41,7 @@ impl TestEnvironment { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = new_tracker(cfg.clone()); + let tracker = setup_with_config(cfg.clone()); let udp_server = udp_server(cfg.udp_trackers[0].clone()); From de5775759e0c5abdd615e5b4faceb5abd9031c28 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 13:48:27 +0000 Subject: [PATCH 429/435] refactor: remove duplicate app initialization code There was a lot of duplicate code for app initialization between prod and testing code. --- src/bootstrap/app.rs | 54 ++++++++++--------- src/bootstrap/mod.rs | 1 - src/tracker/services/mod.rs | 11 ++-- .../{statistics.rs => statistics/mod.rs} | 2 + .../services/statistics/setup.rs} | 8 +-- tests/common/app.rs | 9 +--- tests/servers/api/test_environment.rs | 4 +- tests/servers/http/test_environment.rs | 4 +- tests/servers/udp/test_environment.rs | 4 +- 9 files changed, 48 insertions(+), 49 deletions(-) rename src/tracker/services/{statistics.rs => statistics/mod.rs} (99%) rename src/{bootstrap/stats.rs => tracker/services/statistics/setup.rs} (69%) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index b76641ecd..e845feac0 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -3,11 +3,26 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; -use crate::bootstrap::stats; +use crate::bootstrap; use crate::shared::clock::static_time; use crate::shared::crypto::ephemeral_instance_keys; +use crate::tracker::services::tracker_factory; use crate::tracker::Tracker; -use crate::{bootstrap, tracker}; + +#[must_use] +pub fn setup() -> (Arc, Arc) { + let configuration = Arc::new(initialize_configuration()); + let tracker = initialize_with_configuration(&configuration); + + (configuration, tracker) +} + +#[must_use] +pub fn initialize_with_configuration(configuration: &Arc) -> Arc { + initialize_static(); + initialize_logging(configuration); + Arc::new(initialize_tracker(configuration)) +} pub fn initialize_static() { // Set the time of Torrust app starting @@ -20,35 +35,26 @@ pub fn initialize_static() { /// # Panics /// /// Will panic if it can't load the configuration from either -/// `./config.toml` file or env var `TORRUST_TRACKER_CONFIG`. +/// `./config.toml` file or the env var `TORRUST_TRACKER_CONFIG`. #[must_use] -pub fn setup() -> (Arc, Arc) { +fn initialize_configuration() -> Configuration { const CONFIG_PATH: &str = "./config.toml"; const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; - initialize_static(); - - // Initialize Torrust config - let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { + if env::var(CONFIG_ENV_VAR_NAME).is_ok() { println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); - Arc::new(Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap()) + Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap() } else { println!("Loading configuration from config file {CONFIG_PATH}"); - Arc::new(Configuration::load_from_file(CONFIG_PATH).unwrap()) - }; - - // Initialize statistics - let (stats_event_sender, stats_repository) = stats::setup(config.tracker_usage_statistics); - - // Initialize Torrust tracker - let tracker = match tracker::Tracker::new(config.clone(), stats_event_sender, stats_repository) { - Ok(tracker) => Arc::new(tracker), - Err(error) => { - panic!("{}", error) - } - }; + Configuration::load_from_file(CONFIG_PATH).unwrap() + } +} - bootstrap::logging::setup(&config); +#[must_use] +pub fn initialize_tracker(config: &Arc) -> Tracker { + tracker_factory(config.clone()) +} - (config, tracker) +pub fn initialize_logging(config: &Arc) { + bootstrap::logging::setup(config); } diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs index b4ee0558e..e3b6467ee 100644 --- a/src/bootstrap/mod.rs +++ b/src/bootstrap/mod.rs @@ -1,4 +1,3 @@ pub mod app; pub mod jobs; pub mod logging; -pub mod stats; diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs index 69b0320e8..8667f79a9 100644 --- a/src/tracker/services/mod.rs +++ b/src/tracker/services/mod.rs @@ -5,21 +5,18 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; -use crate::tracker::statistics::Keeper; use crate::tracker::Tracker; /// # Panics /// /// Will panic if tracker cannot be instantiated. #[must_use] -pub fn tracker_factory(configuration: Arc) -> Tracker { - // todo: the tracker initialization is duplicated in many places. - - // Initialize stats tracker - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); +pub fn tracker_factory(config: Arc) -> Tracker { + // Initialize statistics + let (stats_event_sender, stats_repository) = statistics::setup::factory(config.tracker_usage_statistics); // Initialize Torrust tracker - match Tracker::new(configuration, Some(stats_event_sender), stats_repository) { + match Tracker::new(config, stats_event_sender, stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/tracker/services/statistics.rs b/src/tracker/services/statistics/mod.rs similarity index 99% rename from src/tracker/services/statistics.rs rename to src/tracker/services/statistics/mod.rs index cf05fcf90..cae4d1d69 100644 --- a/src/tracker/services/statistics.rs +++ b/src/tracker/services/statistics/mod.rs @@ -1,3 +1,5 @@ +pub mod setup; + use std::sync::Arc; use crate::tracker::statistics::Metrics; diff --git a/src/bootstrap/stats.rs b/src/tracker/services/statistics/setup.rs similarity index 69% rename from src/bootstrap/stats.rs rename to src/tracker/services/statistics/setup.rs index 3b109f297..b7cb831cb 100644 --- a/src/bootstrap/stats.rs +++ b/src/tracker/services/statistics/setup.rs @@ -1,7 +1,7 @@ use crate::tracker::statistics; #[must_use] -pub fn setup(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { +pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { let mut stats_event_sender = None; let mut stats_tracker = statistics::Keeper::new(); @@ -15,13 +15,13 @@ pub fn setup(tracker_usage_statistics: bool) -> (Option) -> Arc { - bootstrap::app::initialize_static(); - - bootstrap::logging::setup(&configuration); - - Arc::new(tracker_factory(configuration)) +pub fn setup_with_configuration(configuration: &Arc) -> Arc { + bootstrap::app::initialize_with_configuration(configuration) } diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs index be767f05e..dbb23dcfa 100644 --- a/tests/servers/api/test_environment.rs +++ b/tests/servers/api/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; use super::connection_info::ConnectionInfo; -use crate::common::app::setup_with_config; +use crate::common::app::setup_with_configuration; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -39,7 +39,7 @@ impl TestEnvironment { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = setup_with_config(cfg.clone()); + let tracker = setup_with_configuration(&cfg); let api_server = api_server(cfg.http_api.clone()); diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs index a1b3444dd..8d0aaba02 100644 --- a/tests/servers/http/test_environment.rs +++ b/tests/servers/http/test_environment.rs @@ -5,7 +5,7 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::app::setup_with_config; +use crate::common::app::setup_with_configuration; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment>; @@ -39,7 +39,7 @@ impl TestEnvironment> { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = setup_with_config(cfg.clone()); + let tracker = setup_with_configuration(&cfg); let http_server = http_server(cfg.http_trackers[0].clone()); diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs index fdbb9036d..15266d881 100644 --- a/tests/servers/udp/test_environment.rs +++ b/tests/servers/udp/test_environment.rs @@ -6,7 +6,7 @@ use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; use torrust_tracker::tracker::peer::Peer; use torrust_tracker::tracker::Tracker; -use crate::common::app::setup_with_config; +use crate::common::app::setup_with_configuration; #[allow(clippy::module_name_repetitions, dead_code)] pub type StoppedTestEnvironment = TestEnvironment; @@ -41,7 +41,7 @@ impl TestEnvironment { pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { let cfg = Arc::new(cfg); - let tracker = setup_with_config(cfg.clone()); + let tracker = setup_with_configuration(&cfg); let udp_server = udp_server(cfg.udp_trackers[0].clone()); From 721227ecf35f4b0ae68834b3a5749c8c13ed5195 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 17:52:09 +0000 Subject: [PATCH 430/435] feat: release 3.0.0-alpha.1 --- Cargo.lock | 10 +++++----- Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 519ea50f0..3bc78bd67 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2861,7 +2861,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "aquatic_udp_protocol", "async-trait", @@ -2908,7 +2908,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "config", "log", @@ -2923,7 +2923,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "log", "thiserror", @@ -2931,7 +2931,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "derive_more", "serde", @@ -2939,7 +2939,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "2.3.0" +version = "3.0.0-alpha.1" dependencies = [ "lazy_static", "rand", diff --git a/Cargo.toml b/Cargo.toml index 6f213995f..36006d7a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,7 @@ version.workspace = true authors = ["Nautilus Cyberneering , Mick van Dijke "] edition = "2021" repository = "https://github.com/torrust/torrust-tracker" -version = "2.3.0" +version = "3.0.0-alpha.1" [dependencies] tokio = { version = "1.26", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } From 6e9ef79e801d9c9d6296ead2a2714de3a69fb3fa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 19:17:25 +0000 Subject: [PATCH 431/435] feat: add description and licence to all workspace packages --- packages/configuration/Cargo.toml | 2 ++ packages/located-error/Cargo.toml | 2 ++ packages/primitives/Cargo.toml | 2 ++ packages/test-helpers/Cargo.toml | 2 ++ 4 files changed, 8 insertions(+) diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index a6f1740a0..dccec59ea 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "torrust-tracker-configuration" +description = "A library to provide configuration to the Torrust Tracker." +license = "AGPL-3.0" version.workspace = true authors.workspace = true edition.workspace = true diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index c4b2ef726..f67ef340f 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "torrust-tracker-located-error" +description = "A library to provide error decorator with the location and the source of the original error." +license = "AGPL-3.0" version.workspace = true authors.workspace = true edition.workspace = true diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 9aec28384..bba45cf5d 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "torrust-tracker-primitives" +description = "A library with the primitive types shared by the Torrust tracker packages." +license = "AGPL-3.0" version.workspace = true authors.workspace = true edition.workspace = true diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 2f942bac7..a4c6528ab 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "torrust-tracker-test-helpers" +description = "A library providing helpers for testing the Torrust tracker." +license = "AGPL-3.0" version.workspace = true authors.workspace = true edition.workspace = true From 67e7d51e8d8125429f0ecf18b8d48f2cdb9c5220 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Mar 2023 19:17:52 +0000 Subject: [PATCH 432/435] ci: fix workflow to publish crates Now we have a workspace with multiple packages and we have to publish one at the time followint the inverse dependency order. --- .github/workflows/publish_crate.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index c120a0fc5..d92b4e557 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -46,6 +46,22 @@ jobs: with: toolchain: stable - - run: cargo publish + - run: cargo publish -p torrust-tracker-located-error + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + + - run: cargo publish -p torrust-tracker-primitives + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + + - run: cargo publish -p torrust-tracker-configuration + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + + - run: cargo publish -p torrust-tracker-test-helpers + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} + + - run: cargo publish -p torrust-tracker env: CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} From 354edec3cd17ce721fd5aa07a91abab86fbaf2e2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 19 Mar 2023 11:37:01 +0000 Subject: [PATCH 433/435] fix: all dependencies must have a version specified when publishing --- Cargo.toml | 8 ++++---- packages/configuration/Cargo.toml | 4 ++-- packages/located-error/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 36006d7a4..4b6bcb323 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,9 +42,9 @@ axum = "0.6.10" axum-server = { version = "0.4", features = ["tls-rustls"] } axum-client-ip = "0.4" bip_bencode = "0.4" -torrust-tracker-primitives = { path = "packages/primitives" } -torrust-tracker-configuration = { path = "packages/configuration" } -torrust-tracker-located-error = { path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "packages/configuration" } +torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "packages/located-error" } multimap = "0.8" hyper = "0.14" @@ -55,7 +55,7 @@ serde_urlencoded = "0.7" serde_repr = "0.1" serde_bytes = "0.11" local-ip-address = "0.5" -torrust-tracker-test-helpers = { path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-alpha.1", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index dccec59ea..aade6272d 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -13,6 +13,6 @@ config = "0.13" toml = "0.5" log = { version = "0.4", features = ["release_max_level_info"] } thiserror = "1.0" -torrust-tracker-primitives = { path = "../primitives" } -torrust-tracker-located-error = { path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "../located-error" } uuid = { version = "1", features = ["v4"] } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index f67ef340f..7d66bba65 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -2,7 +2,7 @@ name = "torrust-tracker-located-error" description = "A library to provide error decorator with the location and the source of the original error." license = "AGPL-3.0" -version.workspace = true +version = "3.0.0-alpha.1" authors.workspace = true edition.workspace = true diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index a4c6528ab..4483f8f4d 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -10,5 +10,5 @@ edition.workspace = true tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } lazy_static = "1.4" rand = "0.8.5" -torrust-tracker-configuration = { path = "../configuration"} -torrust-tracker-primitives = { path = "../primitives"} +torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "../configuration"} +torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives"} From fcfd9f470092b2e54c3beee0615475ab46e3f958 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 19 Mar 2023 11:46:13 +0000 Subject: [PATCH 434/435] refactor: single step to publish on crates.io --- .github/workflows/publish_crate.yml | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml index d92b4e557..4d5d0772e 100644 --- a/.github/workflows/publish_crate.yml +++ b/.github/workflows/publish_crate.yml @@ -46,22 +46,12 @@ jobs: with: toolchain: stable - - run: cargo publish -p torrust-tracker-located-error - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} - - - run: cargo publish -p torrust-tracker-primitives - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} - - - run: cargo publish -p torrust-tracker-configuration - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} - - - run: cargo publish -p torrust-tracker-test-helpers - env: - CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} - - - run: cargo publish -p torrust-tracker + - name: Publish workspace packages + run: | + cargo publish -p torrust-tracker-located-error + cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-configuration + cargo publish -p torrust-tracker-test-helpers + cargo publish -p torrust-tracker env: CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} From 62cd78f12296ff227d7f42c18b099c6f0e8fb61e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sun, 19 Mar 2023 11:54:48 +0000 Subject: [PATCH 435/435] fix: package version inherid from workspace --- packages/located-error/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 7d66bba65..f67ef340f 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -2,7 +2,7 @@ name = "torrust-tracker-located-error" description = "A library to provide error decorator with the location and the source of the original error." license = "AGPL-3.0" -version = "3.0.0-alpha.1" +version.workspace = true authors.workspace = true edition.workspace = true