diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000..71480e92d --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,5 @@ +[alias] +cov = "llvm-cov" +cov-lcov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" +cov-html = "llvm-cov --html" +time = "build --timings --all-targets" diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..3d8a25cce --- /dev/null +++ b/.dockerignore @@ -0,0 +1,16 @@ +.git +.git-blame-ignore +.github +.gitignore +.vscode +bin/ +config.toml +config.toml.local +cSpell.json +data.db +docker/ +NOTICE +README.md +rustfmt.toml +storage/ +target/ diff --git a/.env.local b/.env.local new file mode 100644 index 000000000..fefed56c4 --- /dev/null +++ b/.env.local @@ -0,0 +1 @@ +TORRUST_TRACKER_USER_UID=1000 \ No newline at end of file diff --git a/.git-blame-ignore b/.git-blame-ignore new file mode 100644 index 000000000..06c439a36 --- /dev/null +++ b/.git-blame-ignore @@ -0,0 +1,4 @@ +# https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revs-fileltfilegt + +# Format the world! +57bf2000e39dccfc2f8b6e41d6c6f3eac38a3886 diff --git a/.github/workflows/publish_crate.yml b/.github/workflows/publish_crate.yml new file mode 100644 index 000000000..4d5d0772e --- /dev/null +++ b/.github/workflows/publish_crate.yml @@ -0,0 +1,57 @@ +name: Publish crate + +on: + push: + tags: + - "v*" + +jobs: + check-secret: + runs-on: ubuntu-latest + environment: crates-io-torrust + outputs: + publish: ${{ steps.check.outputs.publish }} + steps: + - id: check + env: + CRATES_TOKEN: "${{ secrets.CRATES_TOKEN }}" + if: "${{ env.CRATES_TOKEN != '' }}" + run: echo "publish=true" >> $GITHUB_OUTPUT + + test: + needs: check-secret + if: needs.check-secret.outputs.publish == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + components: llvm-tools-preview + - uses: Swatinem/rust-cache@v2 + - name: Run Tests + run: cargo test + + publish: + needs: test + if: needs.check-secret.outputs.publish == 'true' + runs-on: ubuntu-latest + environment: crates-io-torrust + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Install stable toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + + - name: Publish workspace packages + run: | + cargo publish -p torrust-tracker-located-error + cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-configuration + cargo publish -p torrust-tracker-test-helpers + cargo publish -p torrust-tracker + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_TOKEN }} diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml new file mode 100644 index 000000000..20152a727 --- /dev/null +++ b/.github/workflows/publish_docker_image.yml @@ -0,0 +1,86 @@ +name: Publish docker image + +on: + push: + branches: + - "main" + - "develop" + tags: + - "v*" + +env: + # Azure file share volume mount requires the Linux container run as root + # https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations + # TORRUST_TRACKER_RUN_AS_USER: root + TORRUST_TRACKER_RUN_AS_USER: appuser + +jobs: + check-secret: + runs-on: ubuntu-latest + environment: dockerhub-torrust + outputs: + publish: ${{ steps.check.outputs.publish }} + steps: + - id: check + env: + DOCKER_HUB_USERNAME: "${{ secrets.DOCKER_HUB_USERNAME }}" + if: "${{ env.DOCKER_HUB_USERNAME != '' }}" + run: echo "publish=true" >> $GITHUB_OUTPUT + + test: + needs: check-secret + if: needs.check-secret.outputs.publish == 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + components: llvm-tools-preview + - uses: Swatinem/rust-cache@v2 + - name: Run Tests + run: cargo test + + dockerhub: + needs: test + if: needs.check-secret.outputs.publish == 'true' + runs-on: ubuntu-latest + environment: dockerhub-torrust + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: | + # For example: torrust/tracker + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v3 + with: + context: . + file: ./Dockerfile + build-args: | + RUN_AS_USER=${{ env.TORRUST_TRACKER_RUN_AS_USER }} + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 7be5626e5..3b9a9a44a 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -4,19 +4,40 @@ name: CI on: [push, pull_request] jobs: + format: + runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + steps: + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@stable + with: + toolchain: nightly + components: rustfmt, clippy + - uses: Swatinem/rust-cache@v2 + - name: Check Rust Formatting + run: cargo fmt --check + test: + needs: format runs-on: ubuntu-latest env: CARGO_TERM_COLOR: always steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable - - uses: Swatinem/rust-cache@v1 - - name: Run tests - run: cargo test + components: llvm-tools-preview + - uses: Swatinem/rust-cache@v2 + - name: Check Rust Code + run: cargo check --all-targets + - name: Clippy Rust Code + run: cargo clippy --all-targets -- -D clippy::pedantic + - uses: taiki-e/install-action@cargo-llvm-cov + - uses: taiki-e/install-action@nextest + - name: Run Tests + run: cargo llvm-cov nextest build: needs: test @@ -28,15 +49,14 @@ jobs: env: CARGO_TERM_COLOR: always steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@stable with: - profile: minimal toolchain: stable - - uses: Swatinem/rust-cache@v1 - - name: Build torrust tracker + - uses: Swatinem/rust-cache@v2 + - name: Build Torrust Tracker run: cargo build --release - - name: Upload build artifact + - name: Upload Build Artifact uses: actions/upload-artifact@v2 with: name: torrust-tracker @@ -46,7 +66,7 @@ jobs: needs: build runs-on: ubuntu-latest steps: - - name: Download build artifact + - name: Download Build Artifact uses: actions/download-artifact@v2 with: name: torrust-tracker diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml new file mode 100644 index 000000000..0c3fc36d8 --- /dev/null +++ b/.github/workflows/test_docker.yml @@ -0,0 +1,26 @@ +name: Test docker build + +on: + push: + pull_request: + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build docker image + uses: docker/build-push-action@v3 + with: + context: . + file: ./Dockerfile + push: false + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Build docker-compose images + run: docker compose build diff --git a/.gitignore b/.gitignore index 99a07430b..6b58dcb45 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,11 @@ -/target +.env **/*.rs.bk -/database.json.bz2 -/database.db +/.coverage/ /.idea/ +/.vscode/launch.json /config.toml /data.db +/database.db +/database.json.bz2 +/storage/ +/target diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 000000000..11d11a5c5 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,6 @@ +{ + "recommendations": [ + "streetsidesoftware.code-spell-checker", + "rust-lang.rust-analyzer" + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..94f199bd6 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,8 @@ +{ + "[rust]": { + "editor.formatOnSave": true + }, + "rust-analyzer.checkOnSave.command": "clippy", + "rust-analyzer.checkOnSave.allTargets": true, + "rust-analyzer.checkOnSave.extraArgs": ["--","-W","clippy::pedantic"], +} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 4279ad5b9..3bc78bd67 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,824 +2,1922 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", +] + [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "aquatic_udp_protocol" -version = "0.1.0" -source = "git+https://github.com/greatest-ape/aquatic#065e007ede84de20f20983b4b504471bbda2fdf2" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16149f27924d42b337a637cd90a8ee2a8973bbccf32aabebce2b3c66913f947f" dependencies = [ "byteorder", "either", ] [[package]] -name = "arrayvec" -version = "0.5.2" +name = "arc-swap" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] -name = "autocfg" -version = "1.0.1" +name = "arrayvec" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] -name = "base64" -version = "0.13.0" +name = "async-trait" +version = "0.1.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] [[package]] -name = "binascii" -version = "0.1.4" +name = "autocfg" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] -name = "bitflags" -version = "1.2.1" +name = "axum" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "8582122b8edba2af43eaf6b80dbfd33f421b5a0eb3a3113d21bc096ac5b44faf" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] [[package]] -name = "block-buffer" -version = "0.7.3" +name = "axum-client-ip" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +checksum = "0d719fabd6813392bbc10e1fe67f2977fad52791a836e51236f7e02f2482e017" dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.3", + "axum", + "forwarded-header-value", + "serde", ] [[package]] -name = "block-buffer" -version = "0.9.0" +name = "axum-core" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +checksum = "b2f958c80c248b34b9a877a643811be8dbca03ca5ba827f2b63baf3a81e5fc4e" dependencies = [ - "generic-array 0.14.4", + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", ] [[package]] -name = "block-padding" -version = "0.1.5" +name = "axum-server" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +checksum = "25e4a990e1593e286b1b96e6df76da9dbcb84945a810287ca8101f1a4f000f61" dependencies = [ - "byte-tools", + "arc-swap", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "tokio", + "tokio-rustls", + "tower-service", ] [[package]] -name = "buf_redux" -version = "0.8.4" +name = "backtrace" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ - "memchr", - "safemem", + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", ] [[package]] -name = "bumpalo" -version = "3.8.0" +name = "base64" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] -name = "byte-tools" -version = "0.3.1" +name = "base64" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] -name = "byteorder" -version = "1.4.2" +name = "bigdecimal" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" +checksum = "6aaf33151a6429fe9211d1b276eafdf70cdff28b071e76c0b0e1503221ea3744" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] [[package]] -name = "bytes" -version = "1.0.1" +name = "binascii" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] -name = "cc" -version = "1.0.66" +name = "bindgen" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", +] [[package]] -name = "cfg-if" -version = "0.1.10" +name = "bip_bencode" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +checksum = "6048cc5d9680544a5098a290d2845df7dae292c97687b9896b70365bad0ea416" +dependencies = [ + "error-chain", +] [[package]] -name = "cfg-if" -version = "1.0.0" +name = "bitflags" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] -name = "chrono" -version = "0.4.19" +name = "bitvec" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ - "libc", - "num-integer", - "num-traits 0.2.14", - "time", - "winapi", + "funty", + "radium", + "tap", + "wyz", ] [[package]] -name = "config" -version = "0.11.0" +name = "block-buffer" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1b9d958c2b1368a663f05538fc1b5975adce1e19f435acceae987aceeeb369" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "lazy_static", - "nom", - "rust-ini", - "serde 1.0.120", - "serde-hjson", - "serde_json", - "toml", - "yaml-rust", + "generic-array", ] [[package]] -name = "convert_case" -version = "0.4.0" +name = "borsh" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +checksum = "40f9ca3698b2e4cb7c15571db0abc5551dca417a21ae8140460b50309bb2cc62" +dependencies = [ + "borsh-derive", + "hashbrown 0.13.2", +] [[package]] -name = "cpufeatures" -version = "0.2.1" +name = "borsh-derive" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "598b3eacc6db9c3ee57b22707ad8f6a8d2f6d442bfe24ffeb8cbb70ca59e6a35" dependencies = [ - "libc", + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate", + "proc-macro2", + "syn", ] [[package]] -name = "derive_more" -version = "0.99.17" +name = "borsh-derive-internal" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "186b734fa1c9f6743e90c95d7233c9faab6360d1a96d4ffa19d9cfd1e9350f8a" dependencies = [ - "convert_case", "proc-macro2", "quote", - "rustc_version", "syn", ] [[package]] -name = "digest" -version = "0.8.1" +name = "borsh-schema-derive-internal" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +checksum = "99b7ff1008316626f485991b960ade129253d4034014616b94f309a15366cc49" dependencies = [ - "generic-array 0.12.3", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "digest" -version = "0.9.0" +name = "bufstream" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" + +[[package]] +name = "bumpalo" +version = "3.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" + +[[package]] +name = "bytecheck" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +checksum = "13fe11640a23eb24562225322cd3e452b93a3d4091d62fab69c70542fcd17d1f" dependencies = [ - "generic-array 0.14.4", + "bytecheck_derive", + "ptr_meta", + "simdutf8", ] [[package]] -name = "either" -version = "1.6.1" +name = "bytecheck_derive" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "e31225543cb46f81a7e224762764f4a6a0f097b1db0b175f69e8065efaa42de5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] [[package]] -name = "fake-simd" -version = "0.1.2" +name = "byteorder" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] -name = "fallible-iterator" -version = "0.2.0" +name = "bytes" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] -name = "fallible-streaming-iterator" -version = "0.1.9" +name = "cc" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] -name = "fern" +name = "cexpr" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9a4820f0ccc8a7afd67c39a0f1a0f4b07ca1725164271a64939d7aeb9af065" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "log", + "nom", ] [[package]] -name = "fnv" -version = "1.0.7" +name = "cfg-if" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] -name = "form_urlencoded" -version = "1.0.0" +name = "chrono" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ - "matches", - "percent-encoding", + "iana-time-zone", + "js-sys", + "num-integer", + "num-traits", + "serde", + "time 0.1.45", + "wasm-bindgen", + "winapi", ] [[package]] -name = "futures" -version = "0.3.21" +name = "clang-sys" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", + "glob", + "libc", + "libloading", ] [[package]] -name = "futures-channel" -version = "0.3.21" +name = "cmake" +version = "0.1.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" dependencies = [ - "futures-core", - "futures-sink", + "cc", ] [[package]] -name = "futures-core" -version = "0.3.21" +name = "codespan-reporting" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] [[package]] -name = "futures-executor" -version = "0.3.21" +name = "config" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" dependencies = [ - "futures-core", - "futures-task", - "futures-util", + "async-trait", + "json5", + "lazy_static", + "nom", + "pathdiff", + "ron", + "rust-ini", + "serde", + "serde_json", + "toml 0.5.11", + "yaml-rust", ] [[package]] -name = "futures-io" -version = "0.3.21" +name = "convert_case" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] -name = "futures-macro" -version = "0.3.21" +name = "core-foundation" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ - "proc-macro2", - "quote", - "syn", + "core-foundation-sys", + "libc", ] [[package]] -name = "futures-sink" -version = "0.3.21" +name = "core-foundation-sys" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] -name = "futures-task" -version = "0.3.21" +name = "cpufeatures" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +dependencies = [ + "libc", +] [[package]] -name = "futures-util" -version = "0.3.21" +name = "crc32fast" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", + "cfg-if", +] + +[[package]] +name = "crossbeam" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" +dependencies = [ + "cfg-if", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +dependencies = [ + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +dependencies = [ + "autocfg", + "cfg-if", + "crossbeam-utils", + "memoffset", + "scopeguard", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "cxx" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn", +] + +[[package]] +name = "derive_utils" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7590f99468735a318c254ca9158d0c065aa9b5312896b5a043b5e39bc96f5fa2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + +[[package]] +name = "digest" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "dlv-list" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" + +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + +[[package]] +name = "either" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" + +[[package]] +name = "encoding_rs" +version = "0.8.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "error-chain" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" +dependencies = [ + "backtrace", +] + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fern" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bdd7b0849075e79ee9a1836df22c717d1eba30451796fdc631b04565dd11e2a" +dependencies = [ + "log", +] + +[[package]] +name = "flate2" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +dependencies = [ + "crc32fast", + "libz-sys", + "miniz_oxide", +] + +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror", +] + +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + +[[package]] +name = "frunk" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89c703bf50009f383a0873845357cc400a95fc535f836feddfe015d7df6e1e0" +dependencies = [ + "frunk_core", + "frunk_derives", + "frunk_proc_macros", +] + +[[package]] +name = "frunk_core" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a446d01a558301dca28ef43222864a9fa2bd9a2e71370f769d5d5d5ec9f3537" + +[[package]] +name = "frunk_derives" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b83164912bb4c97cfe0772913c7af7387ee2e00cb6d4636fb65a35b3d0c8f173" +dependencies = [ + "frunk_proc_macro_helpers", + "quote", + "syn", +] + +[[package]] +name = "frunk_proc_macro_helpers" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "015425591bbeb0f5b8a75593340f1789af428e9f887a4f1e36c0c471f067ef50" +dependencies = [ + "frunk_core", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frunk_proc_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea01524f285deab48affffb342b97f186e657b119c3f1821ac531780e0fbfae0" +dependencies = [ + "frunk_core", + "frunk_proc_macros_impl", + "proc-macro-hack", +] + +[[package]] +name = "frunk_proc_macros_impl" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a802d974cc18ee7fe1a7868fc9ce31086294fd96ba62f8da64ecb44e92a2653" +dependencies = [ + "frunk_core", + "frunk_proc_macro_helpers", + "proc-macro-hack", + "quote", + "syn", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" + +[[package]] +name = "futures-executor" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531" + +[[package]] +name = "futures-macro" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" + +[[package]] +name = "futures-task" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" + +[[package]] +name = "futures-util" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", "slab", ] [[package]] -name = "generic-array" -version = "0.12.3" +name = "generic-array" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "gimli" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "h2" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be7b54589b581f624f566bf5d8eb2bab1db736c51528720b6bd36b96b55924d" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.3", +] + +[[package]] +name = "hashlink" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ - "typenum", + "hashbrown 0.12.3", ] [[package]] -name = "generic-array" -version = "0.14.4" +name = "hermit-abi" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ - "typenum", - "version_check", + "libc", ] [[package]] -name = "getrandom" -version = "0.1.16" +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "http" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", + "bytes", + "fnv", + "itoa", ] [[package]] -name = "getrandom" -version = "0.2.3" +name = "http-body" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "bytes", + "http", + "pin-project-lite", ] [[package]] -name = "h2" -version = "0.3.4" +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + +[[package]] +name = "hyper" +version = "0.14.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" dependencies = [ "bytes", - "fnv", + "futures-channel", "futures-core", - "futures-sink", "futures-util", + "h2", "http", - "indexmap", - "slab", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", "tokio", - "tokio-util", + "tower-service", "tracing", + "want", ] [[package]] -name = "hashbrown" -version = "0.9.1" +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] [[package]] -name = "headers" -version = "0.3.3" +name = "iana-time-zone-haiku" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62689dc57c7456e69712607ffcbd0aa1dfcccf9af73727e9b25bc1825375cac3" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" dependencies = [ - "base64", - "bitflags", - "bytes", - "headers-core", - "http", - "mime", - "sha-1 0.8.2", - "time", + "cxx", + "cxx-build", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-enum" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4b0d47a958cb166282b4dc4840a35783e861c2b39080af846e6481ebe145eee" +dependencies = [ + "derive_utils", + "quote", + "syn", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + +[[package]] +name = "ipnet" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" + +[[package]] +name = "js-sys" +version = "0.3.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "lexical" +version = "6.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7aefb36fd43fef7003334742cbf77b243fcd36418a1d1bdd480d613a67968f6" +dependencies = [ + "lexical-core", +] + +[[package]] +name = "lexical-core" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" +dependencies = [ + "lexical-parse-float", + "lexical-parse-integer", + "lexical-util", + "lexical-write-float", + "lexical-write-integer", +] + +[[package]] +name = "lexical-parse-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" +dependencies = [ + "lexical-parse-integer", + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-parse-integer" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "lexical-util" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" +dependencies = [ + "static_assertions", +] + +[[package]] +name = "lexical-write-float" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" +dependencies = [ + "lexical-util", + "lexical-write-integer", + "static_assertions", +] + +[[package]] +name = "lexical-write-integer" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" +dependencies = [ + "lexical-util", + "static_assertions", +] + +[[package]] +name = "libc" +version = "0.2.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" + +[[package]] +name = "libloading" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +dependencies = [ + "cfg-if", + "winapi", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "link-cplusplus" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +dependencies = [ + "cc", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + +[[package]] +name = "local-ip-address" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faa9d02443a1741e9f51dafdfcbffb3863b2a89c457d762b40337d6c5153ef81" +dependencies = [ + "libc", + "neli", + "thiserror", + "windows-sys 0.42.0", +] + +[[package]] +name = "lock_api" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +dependencies = [ + "autocfg", + "scopeguard", ] [[package]] -name = "headers-core" -version = "0.2.0" +name = "log" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "http", + "cfg-if", ] [[package]] -name = "hermit-abi" -version = "0.1.18" +name = "lru" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" dependencies = [ - "libc", + "hashbrown 0.12.3", ] [[package]] -name = "hex" -version = "0.4.3" +name = "matchit" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] -name = "http" -version = "0.2.3" +name = "memchr" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" -dependencies = [ - "bytes", - "fnv", - "itoa", -] +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] -name = "http-body" -version = "0.4.0" +name = "memoffset" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ - "bytes", - "http", + "autocfg", ] [[package]] -name = "httparse" -version = "1.5.1" +name = "mime" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] -name = "httpdate" -version = "0.3.2" +name = "minimal-lexical" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] -name = "hyper" -version = "0.14.2" +name = "miniz_oxide" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", + "adler", ] [[package]] -name = "idna" -version = "0.2.3" +name = "mio" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", + "libc", + "log", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.45.0", ] [[package]] -name = "indexmap" -version = "1.6.1" +name = "mockall" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" +checksum = "50e4a1c770583dac7ab5e2f6c139153b783a53a1bbee9729613f193e59828326" dependencies = [ - "autocfg", - "hashbrown", + "cfg-if", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", ] [[package]] -name = "input_buffer" -version = "0.4.0" +name = "mockall_derive" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" +checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" dependencies = [ - "bytes", + "cfg-if", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "instant" -version = "0.1.10" +name = "multimap" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" dependencies = [ - "cfg-if 1.0.0", + "serde", ] [[package]] -name = "itoa" -version = "0.4.7" +name = "mysql" +version = "23.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "05f11339ca5c251941805d51362a07823605a80586ced92914ab7de84fba813f" +dependencies = [ + "bufstream", + "bytes", + "crossbeam", + "flate2", + "io-enum", + "libc", + "lru", + "mysql_common", + "named_pipe", + "native-tls", + "once_cell", + "pem", + "percent-encoding", + "serde", + "serde_json", + "socket2", + "twox-hash", + "url", +] [[package]] -name = "js-sys" -version = "0.3.55" +name = "mysql_common" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" +checksum = "9006c95034ccf7b903d955f210469119f6c3477fc9c9e7a7845ce38a3e665c2a" dependencies = [ - "wasm-bindgen", + "base64 0.13.1", + "bigdecimal", + "bindgen", + "bitflags", + "bitvec", + "byteorder", + "bytes", + "cc", + "cmake", + "crc32fast", + "flate2", + "frunk", + "lazy_static", + "lexical", + "num-bigint", + "num-traits", + "rand", + "regex", + "rust_decimal", + "saturating", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "subprocess", + "thiserror", + "time 0.3.20", + "uuid", ] [[package]] -name = "lazy_static" -version = "1.4.0" +name = "named_pipe" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "ad9c443cce91fc3e12f017290db75dde490d685cdaaf508d7159d7cf41f0eb2b" +dependencies = [ + "winapi", +] [[package]] -name = "lexical-core" -version = "0.7.6" +name = "native-tls" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ - "arrayvec", - "bitflags", - "cfg-if 1.0.0", - "ryu", - "static_assertions", + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", ] [[package]] -name = "libc" -version = "0.2.101" +name = "neli" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" +checksum = "9053554eb5dcb7e10d9cdab1206965bde870eed5d0d341532ca035e3ba221508" +dependencies = [ + "byteorder", + "libc", +] [[package]] -name = "libsqlite3-sys" -version = "0.18.0" +name = "nom" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e704a02bcaecd4a08b93a23f6be59d0bd79cd161e0963e9499165a0a35df7bd" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ - "pkg-config", - "vcpkg", + "memchr", + "minimal-lexical", ] [[package]] -name = "linked-hash-map" -version = "0.5.4" +name = "nonempty" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" [[package]] -name = "lock_api" -version = "0.4.4" +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + +[[package]] +name = "num-bigint" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ - "scopeguard", + "autocfg", + "num-integer", + "num-traits", ] [[package]] -name = "log" -version = "0.4.13" +name = "num-integer" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ - "cfg-if 0.1.10", + "autocfg", + "num-traits", ] [[package]] -name = "lru-cache" -version = "0.1.2" +name = "num-traits" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ - "linked-hash-map", + "autocfg", ] [[package]] -name = "matches" -version = "0.1.8" +name = "num_cpus" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +dependencies = [ + "hermit-abi", + "libc", +] [[package]] -name = "memchr" -version = "2.4.1" +name = "object" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439" +dependencies = [ + "memchr", +] [[package]] -name = "mime" -version = "0.3.16" +name = "once_cell" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] -name = "mime_guess" -version = "2.0.3" +name = "openssl" +version = "0.10.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" dependencies = [ - "mime", - "unicase", + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", ] [[package]] -name = "mio" -version = "0.7.7" +name = "openssl-macros" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "libc", - "log", - "miow", - "ntapi", - "winapi", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "miow" -version = "0.3.6" +name = "openssl-probe" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2", - "winapi", -] +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "multipart" -version = "0.17.1" +name = "openssl-src" +version = "111.25.1+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050aeedc89243f5347c3e237e3e13dc76fbe4ae3742a57b94dc14f69acf76d4" +checksum = "1ef9a9cc6ea7d9d5e7c4a913dc4b48d0e359eddf01af1dfec96ba7064b4aba10" dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand 0.7.3", - "safemem", - "tempfile", - "twoway", + "cc", ] [[package]] -name = "nom" -version = "5.1.2" +name = "openssl-sys" +version = "0.9.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" dependencies = [ - "lexical-core", - "memchr", - "version_check", + "autocfg", + "cc", + "libc", + "openssl-src", + "pkg-config", + "vcpkg", ] [[package]] -name = "ntapi" -version = "0.3.6" +name = "ordered-multimap" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" dependencies = [ - "winapi", + "dlv-list", + "hashbrown 0.12.3", ] [[package]] -name = "num-integer" -version = "0.1.44" +name = "parking_lot" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "autocfg", - "num-traits 0.2.14", + "lock_api", + "parking_lot_core", ] [[package]] -name = "num-traits" -version = "0.1.43" +name = "parking_lot_core" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ - "num-traits 0.2.14", + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys 0.45.0", ] [[package]] -name = "num-traits" -version = "0.2.14" +name = "pathdiff" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" -dependencies = [ - "autocfg", -] +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" [[package]] -name = "num_cpus" -version = "1.13.0" +name = "peeking_take_while" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" -dependencies = [ - "hermit-abi", - "libc", -] +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] -name = "once_cell" -version = "1.5.2" +name = "pem" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +dependencies = [ + "base64 0.13.1", +] [[package]] -name = "opaque-debug" -version = "0.2.3" +name = "percent-encoding" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] -name = "opaque-debug" -version = "0.3.0" +name = "pest" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "8cbd939b234e95d72bc393d51788aec68aeeb5d51e748ca08ff3aad58cb722f7" +dependencies = [ + "thiserror", + "ucd-trie", +] [[package]] -name = "parking_lot" -version = "0.11.1" +name = "pest_derive" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "a81186863f3d0a27340815be8f2078dd8050b14cd71913db9fbda795e5f707d7" dependencies = [ - "instant", - "lock_api", - "parking_lot_core", + "pest", + "pest_generator", ] [[package]] -name = "parking_lot_core" -version = "0.8.3" +name = "pest_generator" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall", - "smallvec", - "winapi", + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "percent-encoding" -version = "2.1.0" +name = "pest_meta" +version = "2.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" +dependencies = [ + "once_cell", + "pest", + "sha2", +] [[package]] name = "pin-project" -version = "1.0.4" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.4" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -827,58 +1925,117 @@ dependencies = [ ] [[package]] -name = "pin-project-lite" -version = "0.2.4" +name = "pin-project-lite" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "predicates" +version = "2.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" +dependencies = [ + "difflib", + "float-cmp", + "itertools", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" +checksum = "72f883590242d3c6fc5bf50299011695fa6590c2c70eac95ee1bdb9a733ad1a2" [[package]] -name = "pin-utils" -version = "0.1.0" +name = "predicates-tree" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +checksum = "54ff541861505aabf6ea722d2131ee980b8276e10a1297b94e896dd8b621850d" +dependencies = [ + "predicates-core", + "termtree", +] [[package]] -name = "pkg-config" -version = "0.3.19" +name = "proc-macro-crate" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml 0.5.11", +] [[package]] -name = "ppv-lite86" -version = "0.2.10" +name = "proc-macro-hack" +version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "ptr_meta" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" dependencies = [ - "unicode-xid", + "ptr_meta_derive", ] [[package]] -name = "quick-error" -version = "1.2.3" +name = "ptr_meta_derive" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] [[package]] name = "quote" -version = "1.0.8" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] [[package]] name = "r2d2" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", "parking_lot", @@ -886,48 +2043,40 @@ dependencies = [ ] [[package]] -name = "r2d2_sqlite" -version = "0.16.0" +name = "r2d2_mysql" +version = "23.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed60ebe88b27ac28c0563bc0fbeaecd302ff53e3a01e5ddc2ec9f4e6c707d929" +checksum = "9733d738ce65959a744f387bae69aa690a867e18d48e5486b171c47bc7b0c575" dependencies = [ + "mysql", "r2d2", - "rusqlite", ] [[package]] -name = "rand" -version = "0.7.3" +name = "r2d2_sqlite" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "b4f5d0337e99cd5cacd91ffc326c6cc9d8078def459df560c4f9bf9ba4a51034" dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", + "r2d2", + "rusqlite", ] [[package]] -name = "rand" -version = "0.8.4" +name = "radium" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", -] +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] -name = "rand_chacha" -version = "0.2.2" +name = "rand" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "libc", + "rand_chacha", + "rand_core", ] [[package]] @@ -937,59 +2086,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] name = "rand_core" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" -dependencies = [ - "getrandom 0.2.3", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", + "getrandom", ] [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", @@ -998,17 +2120,54 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "rend" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" dependencies = [ - "winapi", + "bytecheck", +] + +[[package]] +name = "reqwest" +version = "0.11.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +dependencies = [ + "base64 0.21.0", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", ] [[package]] @@ -1026,27 +2185,95 @@ dependencies = [ "winapi", ] +[[package]] +name = "rkyv" +version = "0.7.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c30f1d45d9aa61cbc8cd1eb87705470892289bb2d01943e7803b873a57404dc3" +dependencies = [ + "bytecheck", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ron" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +dependencies = [ + "base64 0.13.1", + "bitflags", + "serde", +] + [[package]] name = "rusqlite" -version = "0.23.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45d0fd62e1df63d254714e6cb40d0a0e82e7a1623e7a27f679d851af092ae58b" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", + "hashlink", "libsqlite3-sys", - "lru-cache", - "memchr", "smallvec", - "time", ] [[package]] name = "rust-ini" -version = "0.13.0" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +dependencies = [ + "cfg-if", + "ordered-multimap", +] + +[[package]] +name = "rust_decimal" +version = "1.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13cf35f7140155d02ba4ec3294373d513a3c7baa8364c162b030e33c61520a8" +dependencies = [ + "arrayvec", + "borsh", + "bytecheck", + "byteorder", + "bytes", + "num-traits", + "rand", + "rkyv", + "serde", + "serde_json", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + +[[package]] +name = "rustc-hash" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc_version" @@ -1057,45 +2284,76 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.36.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.45.0", +] + [[package]] name = "rustls" -version = "0.19.1" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ - "base64", "log", "ring", "sct", "webpki", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +dependencies = [ + "base64 0.21.0", +] + +[[package]] +name = "rustversion" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" + [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] -name = "safemem" -version = "0.3.3" +name = "saturating" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" +checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] -name = "scheduled-thread-pool" -version = "0.2.5" +name = "schannel" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "parking_lot", + "windows-sys 0.42.0", ] [[package]] -name = "scoped-tls" -version = "1.0.0" +name = "scheduled-thread-pool" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +dependencies = [ + "parking_lot", +] [[package]] name = "scopeguard" @@ -1103,47 +2361,64 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" + [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", ] [[package]] -name = "semver" -version = "1.0.4" +name = "seahash" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] -name = "serde" -version = "0.8.23" +name = "security-framework" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] [[package]] -name = "serde" -version = "1.0.120" +name = "security-framework-sys" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "166b2349061381baf54a58e4b13c89369feb0ef2eaa57198899e2312aac30aab" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ - "serde_derive", + "core-foundation-sys", + "libc", ] [[package]] -name = "serde-hjson" -version = "0.9.1" +name = "semver" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" + +[[package]] +name = "serde" +version = "1.0.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a3a4e0ea8a88553209f6cc6cfe8724ecad22e1acf372793c27d995290fe74f8" +checksum = "8cdd151213925e7f1ab45a9bbfb129316bd00799784b174b7cc7bcd16961c49e" dependencies = [ - "lazy_static", - "num-traits 0.1.43", - "regex", - "serde 0.8.23", + "serde_derive", ] [[package]] @@ -1152,24 +2427,24 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" dependencies = [ - "serde 1.0.120", + "serde", "serde_bytes", ] [[package]] name = "serde_bytes" -version = "0.11.5" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" +checksum = "416bda436f9aab92e02c8e10d49a15ddd339cea90b6e340fe51ed97abb548294" dependencies = [ - "serde 1.0.120", + "serde", ] [[package]] name = "serde_derive" -version = "1.0.120" +version = "1.0.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca2a8cb5805ce9e3b95435e3765b7b553cecc762d938d409434338386cb5775" +checksum = "4fc80d722935453bcafdc2c9a73cd6fac4dc1938f0346035d84bf99fa9e33217" dependencies = [ "proc-macro2", "quote", @@ -1178,80 +2453,148 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.72" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ffa0837f2dfa6fb90868c2b5468cad482e175f7dad97e7421951e663f2b527" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", - "serde 1.0.120", + "serde", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db0969fff533976baadd92e08b1d102c5a3d8a8049eadfd69d4d1e3c5b2ed189" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_repr" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "395627de918015623b32e7669714206363a7fc00382bf477e72c1f7533e8eafc" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_spanned" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +dependencies = [ + "serde", ] [[package]] name = "serde_urlencoded" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.120", + "serde", ] [[package]] -name = "sha-1" -version = "0.8.2" +name = "serde_with" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ea48c9627169d206b35905699f513f513c303ab9d964a59b44fdcf66c1d1ab7" +dependencies = [ + "base64 0.13.1", + "chrono", + "hex", + "indexmap", + "serde", + "serde_json", + "serde_with_macros", + "time 0.3.20", +] + +[[package]] +name = "serde_with_macros" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e6b7e52858f9f06c25e1c566bbb4ab428200cb3b30053ea09dc50837de7538b" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sha1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", + "cfg-if", + "cpufeatures", + "digest", ] [[package]] -name = "sha-1" -version = "0.9.8" +name = "sha2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest", ] +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "slab" -version = "0.4.2" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +dependencies = [ + "autocfg", +] [[package]] name = "smallvec" -version = "1.6.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.3.19" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ - "cfg-if 1.0.0", "libc", "winapi", ] @@ -1268,45 +2611,87 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "subprocess" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c2e86926081dda636c546d8c5e641661049d7562a68f5488be4a1f7f66f6086" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "syn" -version = "1.0.67" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6498a9efc342871f91cc2d0d694c674368b4ceb40f62b65a7a08c3792935e702" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +dependencies = [ + "cfg-if", + "fastrand", + "redox_syscall", + "rustix", + "windows-sys 0.42.0", ] [[package]] -name = "tempfile" -version = "3.2.0" +name = "termcolor" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ - "cfg-if 1.0.0", - "libc", - "rand 0.8.4", - "redox_syscall", - "remove_dir_all", - "winapi", + "winapi-util", ] +[[package]] +name = "termtree" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95059e91184749cb66be6dc994f67f182b6d897cb3df74a5bf66b5e709295fd8" + [[package]] name = "thiserror" -version = "1.0.26" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" +checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.26" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" +checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" dependencies = [ "proc-macro2", "quote", @@ -1315,35 +2700,62 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] +[[package]] +name = "time" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +dependencies = [ + "itoa", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + +[[package]] +name = "time-macros" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +dependencies = [ + "time-core", +] + [[package]] name = "tinyvec" -version = "1.3.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.7.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c79ba603c337335df6ba6dd6afc38c38a7d5e1b0c871678439ea973cd62a118e" +checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" dependencies = [ "autocfg", "bytes", @@ -1351,29 +2763,39 @@ dependencies = [ "memchr", "mio", "num_cpus", - "once_cell", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", - "winapi", + "windows-sys 0.45.0", ] [[package]] name = "tokio-macros" -version = "1.1.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" -version = "0.22.0" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", "tokio", @@ -1381,176 +2803,274 @@ dependencies = [ ] [[package]] -name = "tokio-stream" -version = "0.1.2" +name = "tokio-util" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ + "bytes", "futures-core", + "futures-sink", "pin-project-lite", "tokio", + "tracing", ] [[package]] -name = "tokio-tungstenite" -version = "0.13.0" +name = "toml" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "futures-util", - "log", - "pin-project", - "tokio", - "tungstenite", + "serde", ] [[package]] -name = "tokio-util" -version = "0.6.7" +name = "toml" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6" dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", ] [[package]] -name = "toml" -version = "0.5.8" +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825" dependencies = [ - "serde 1.0.120", + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", ] [[package]] name = "torrust-tracker" -version = "2.2.0" +version = "3.0.0-alpha.1" dependencies = [ "aquatic_udp_protocol", + "async-trait", + "axum", + "axum-client-ip", + "axum-server", "binascii", - "byteorder", + "bip_bencode", "chrono", "config", "derive_more", "fern", "futures", "hex", + "hyper", + "lazy_static", + "local-ip-address", "log", + "mockall", + "multimap", + "openssl", "percent-encoding", "r2d2", + "r2d2_mysql", "r2d2_sqlite", - "rand 0.8.4", - "serde 1.0.120", + "rand", + "reqwest", + "serde", "serde_bencode", "serde_bytes", "serde_json", + "serde_repr", + "serde_urlencoded", + "serde_with", "thiserror", "tokio", - "toml", - "warp", + "toml 0.7.2", + "torrust-tracker-configuration", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "uuid", ] [[package]] -name = "tower-service" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +name = "torrust-tracker-configuration" +version = "3.0.0-alpha.1" +dependencies = [ + "config", + "log", + "serde", + "serde_with", + "thiserror", + "toml 0.5.11", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "uuid", +] [[package]] -name = "tracing" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +name = "torrust-tracker-located-error" +version = "3.0.0-alpha.1" dependencies = [ - "cfg-if 1.0.0", "log", - "pin-project-lite", - "tracing-core", + "thiserror", ] [[package]] -name = "tracing-core" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +name = "torrust-tracker-primitives" +version = "3.0.0-alpha.1" +dependencies = [ + "derive_more", + "serde", +] + +[[package]] +name = "torrust-tracker-test-helpers" +version = "3.0.0-alpha.1" dependencies = [ "lazy_static", + "rand", + "tokio", + "torrust-tracker-configuration", + "torrust-tracker-primitives", ] [[package]] -name = "try-lock" -version = "0.2.3" +name = "tower" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] [[package]] -name = "tungstenite" -version = "0.12.0" +name = "tower-http" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" dependencies = [ - "base64", - "byteorder", + "bitflags", "bytes", + "futures-core", + "futures-util", "http", - "httparse", - "input_buffer", + "http-body", + "http-range-header", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if", "log", - "rand 0.8.4", - "sha-1 0.9.8", - "url", - "utf-8", + "pin-project-lite", + "tracing-core", ] [[package]] -name = "twoway" -version = "0.1.8" +name = "tracing-core" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ - "memchr", + "once_cell", ] [[package]] -name = "typenum" -version = "1.12.0" +name = "try-lock" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] -name = "unicase" -version = "2.6.0" +name = "twox-hash" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "version_check", + "cfg-if", + "rand", + "static_assertions", ] +[[package]] +name = "typenum" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" + +[[package]] +name = "ucd-trie" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" + [[package]] name = "unicode-bidi" -version = "0.3.6" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" + +[[package]] +name = "unicode-ident" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] -name = "unicode-xid" -version = "0.2.1" +name = "unicode-width" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "untrusted" @@ -1560,21 +3080,23 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", ] [[package]] -name = "utf-8" -version = "0.7.6" +name = "uuid" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +dependencies = [ + "getrandom", +] [[package]] name = "vcpkg" @@ -1584,9 +3106,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" @@ -1598,78 +3120,60 @@ dependencies = [ "try-lock", ] -[[package]] -name = "warp" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332d47745e9a0c38636dbd454729b147d16bd1ed08ae67b3ab281c4506771054" -dependencies = [ - "bytes", - "futures", - "headers", - "http", - "hyper", - "log", - "mime", - "mime_guess", - "multipart", - "percent-encoding", - "pin-project", - "scoped-tls", - "serde 1.0.120", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-rustls", - "tokio-stream", - "tokio-tungstenite", - "tokio-util", - "tower-service", - "tracing", -] - [[package]] name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.78" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.78" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" -version = "0.2.78" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1677,9 +3181,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.78" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", @@ -1690,15 +3194,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.78" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", @@ -1706,9 +3210,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.21.4" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" dependencies = [ "ring", "untrusted", @@ -1730,12 +3234,129 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" + +[[package]] +name = "winnow" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +dependencies = [ + "winapi", +] + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/Cargo.toml b/Cargo.toml index 084a7cfb5..4b6bcb323 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,34 +1,76 @@ [package] name = "torrust-tracker" -version = "2.2.0" -license = "AGPL-3.0" -authors = ["Mick van Dijke "] description = "A feature rich BitTorrent tracker." -edition = "2018" +license = "AGPL-3.0" +authors.workspace = true +edition.workspace = true +version.workspace = true -[profile.release] -lto = "fat" +[workspace.package] +authors = ["Nautilus Cyberneering , Mick van Dijke "] +edition = "2021" +repository = "https://github.com/torrust/torrust-tracker" +version = "3.0.0-alpha.1" [dependencies] -serde = {version = "1.0", features = ["derive"]} -serde_bencode = "^0.2.3" -serde_bytes = "0.11" -serde_json = "1.0.72" -hex = "0.4.3" -percent-encoding = "2.1.0" -warp = {version = "0.3", features = ["tls"]} -tokio = {version = "1.7", features = ["macros", "io-util", "net", "time", "rt-multi-thread", "fs", "sync", "signal"]} +tokio = { version = "1.26", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +serde = { version = "1.0", features = ["derive"] } +serde_bencode = "^0.2" +serde_json = "1.0" +serde_with = "2.0" +hex = "0.4" +percent-encoding = "2.2" binascii = "0.1" -toml = "0.5" -log = {version = "0.4", features = ["release_max_level_info"]} +lazy_static = "1.4" +openssl = { version = "0.10", features = ["vendored"] } +config = "0.13" +toml = "0.7" +log = { version = "0.4", features = ["release_max_level_info"] } fern = "0.6" chrono = "0.4" -byteorder = "1" -r2d2_sqlite = "0.16.0" -r2d2 = "0.8.8" -rand = "0.8.4" -config = "0.11" +r2d2 = "0.8" +r2d2_mysql = "23.0" +r2d2_sqlite = { version = "0.21", features = ["bundled"] } +rand = "0.8" derive_more = "0.99" thiserror = "1.0" -aquatic_udp_protocol = { git = "https://github.com/greatest-ape/aquatic" } -futures = "0.3.21" +futures = "0.3" +async-trait = "0.1" +aquatic_udp_protocol = "0.2" +uuid = { version = "1", features = ["v4"] } +axum = "0.6.10" +axum-server = { version = "0.4", features = ["tls-rustls"] } +axum-client-ip = "0.4" +bip_bencode = "0.4" +torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "packages/primitives" } +torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "packages/configuration" } +torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "packages/located-error" } +multimap = "0.8" +hyper = "0.14" + +[dev-dependencies] +mockall = "0.11" +reqwest = { version = "0.11", features = ["json"] } +serde_urlencoded = "0.7" +serde_repr = "0.1" +serde_bytes = "0.11" +local-ip-address = "0.5" +torrust-tracker-test-helpers = { version = "3.0.0-alpha.1", path = "packages/test-helpers" } + +[workspace] +members = [ + "packages/configuration", + "packages/primitives", + "packages/test-helpers", + "packages/located-error", +] + +[profile.dev] +debug = 1 +opt-level = 1 +lto = "thin" + +[profile.release] +debug = 1 +opt-level = 3 +lto = "fat" diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..96d21fa84 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,80 @@ +FROM clux/muslrust:stable AS chef +WORKDIR /app +RUN cargo install cargo-chef + + +FROM chef AS planner +WORKDIR /app +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + + +FROM chef as development +WORKDIR /app +ARG UID=1000 +ARG RUN_AS_USER=appuser +ARG TRACKER_UDP_PORT=6969 +ARG TRACKER_HTTP_PORT=7070 +ARG TRACKER_API_PORT=1212 +# Add the app user for development +ENV USER=appuser +ENV UID=$UID +RUN adduser --uid "${UID}" "${USER}" +# Build dependencies +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --recipe-path recipe.json +# Build the application +COPY . . +RUN cargo build --bin torrust-tracker +USER $RUN_AS_USER:$RUN_AS_USER +EXPOSE $TRACKER_UDP_PORT/udp +EXPOSE $TRACKER_HTTP_PORT/tcp +EXPOSE $TRACKER_API_PORT/tcp +CMD ["cargo", "run"] + + +FROM chef AS builder +WORKDIR /app +ARG UID=1000 +# Add the app user for production +ENV USER=appuser +ENV UID=$UID +RUN adduser \ + --disabled-password \ + --gecos "" \ + --home "/nonexistent" \ + --shell "/sbin/nologin" \ + --no-create-home \ + --uid "${UID}" \ + "${USER}" +# Build dependencies +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --release --target x86_64-unknown-linux-musl --recipe-path recipe.json +# Build the application +COPY . . +RUN cargo build --release --target x86_64-unknown-linux-musl --bin torrust-tracker +# Strip the binary +# More info: https://github.com/LukeMathWalker/cargo-chef/issues/149 +RUN strip /app/target/x86_64-unknown-linux-musl/release/torrust-tracker + + +FROM alpine:latest +WORKDIR /app +ARG RUN_AS_USER=appuser +ARG TRACKER_UDP_PORT=6969 +ARG TRACKER_HTTP_PORT=7070 +ARG TRACKER_API_PORT=1212 +RUN apk --no-cache add ca-certificates +ENV TZ=Etc/UTC +ENV RUN_AS_USER=$RUN_AS_USER +COPY --from=builder /etc/passwd /etc/passwd +COPY --from=builder /etc/group /etc/group +COPY --from=builder --chown=$RUN_AS_USER \ + /app/target/x86_64-unknown-linux-musl/release/torrust-tracker \ + /app/torrust-tracker +RUN chown -R $RUN_AS_USER:$RUN_AS_USER /app +USER $RUN_AS_USER:$RUN_AS_USER +EXPOSE $TRACKER_UDP_PORT/udp +EXPOSE $TRACKER_HTTP_PORT/tcp +EXPOSE $TRACKER_API_PORT/tcp +ENTRYPOINT ["/app/torrust-tracker"] \ No newline at end of file diff --git a/README.md b/README.md index 929585c11..4e464dd68 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,8 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor * [X] Peer authentication using time-bound keys * [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled * [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count +* [X] MySQL support added as engine option +* [X] Periodically saving added, interval can be configured ### Implemented BEPs * [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol @@ -50,29 +52,26 @@ cargo build --release ```toml log_level = "info" mode = "public" +db_driver = "Sqlite3" db_path = "data.db" -persistence = false -cleanup_interval = 600 -cleanup_peerless = true -external_ip = "0.0.0.0" announce_interval = 120 -announce_interval_min = 900 -peer_timeout = 900 +min_announce_interval = 120 +max_peer_timeout = 900 on_reverse_proxy = false +external_ip = "0.0.0.0" +tracker_usage_statistics = true +persistent_torrent_completed_stat = false +inactive_peer_cleanup_interval = 600 +remove_peerless_torrents = true [[udp_trackers]] enabled = false bind_address = "0.0.0.0:6969" -[[udp_trackers]] -enabled = true -bind_address = "[::]:6969" - [[http_trackers]] enabled = true -bind_address = "0.0.0.0:6969" +bind_address = "0.0.0.0:7070" ssl_enabled = false -ssl_bind_address = "0.0.0.0:6868" ssl_cert_path = "" ssl_key_path = "" diff --git a/bin/install.sh b/bin/install.sh new file mode 100755 index 000000000..d4314ce93 --- /dev/null +++ b/bin/install.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Generate the default settings file if it does not exist +if ! [ -f "./config.toml" ]; then + cp ./config.toml.local ./config.toml +fi + +# Generate the sqlite database if it does not exist +if ! [ -f "./storage/database/data.db" ]; then + # todo: it should get the path from config.toml and only do it when we use sqlite + touch ./storage/database/data.db + echo ";" | sqlite3 ./storage/database/data.db +fi diff --git a/cSpell.json b/cSpell.json new file mode 100644 index 000000000..4a9b11ce9 --- /dev/null +++ b/cSpell.json @@ -0,0 +1,84 @@ +{ + "words": [ + "appuser", + "AUTOINCREMENT", + "automock", + "Avicora", + "Azureus", + "bencode", + "bencoded", + "binascii", + "Bitflu", + "bools", + "bufs", + "Buildx", + "byteorder", + "canonicalize", + "canonicalized", + "chrono", + "clippy", + "completei", + "dockerhub", + "downloadedi", + "filesd", + "Freebox", + "hasher", + "hexlify", + "hlocalhost", + "Hydranode", + "incompletei", + "infohash", + "infohashes", + "infoschema", + "intervali", + "lcov", + "leecher", + "leechers", + "libtorrent", + "Lphant", + "middlewares", + "mockall", + "multimap", + "myacicontext", + "nanos", + "nextest", + "nocapture", + "numwant", + "oneshot", + "ostr", + "Pando", + "proot", + "Quickstart", + "Rasterbar", + "repr", + "reqwest", + "rngs", + "rusqlite", + "rustfmt", + "Rustls", + "Seedable", + "Shareaza", + "sharktorrent", + "socketaddr", + "sqllite", + "subsec", + "Swatinem", + "Swiftbit", + "thiserror", + "Torrentstorm", + "torrust", + "torrustracker", + "trackerid", + "typenum", + "Unamed", + "untuple", + "uroot", + "Vagaa", + "Vuze", + "whitespaces", + "Xtorrent", + "Xunlei", + "xxxxxxxxxxxxxxxxxxxxd", + "yyyyyyyyyyyyyyyyyyyyd" + ] +} diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 000000000..d11f9c8ae --- /dev/null +++ b/compose.yaml @@ -0,0 +1,48 @@ +name: torrust +services: + + tracker: + build: + context: . + target: development + user: ${TORRUST_TRACKER_USER_UID:-1000}:${TORRUST_TRACKER_USER_UID:-1000} + tty: true + networks: + - server_side + ports: + - 6969:6969/udp + - 7070:7070 + - 1212:1212 + volumes: + - ./:/app + - ~/.cargo:/home/appuser/.cargo + depends_on: + - mysql + + mysql: + image: mysql:8.0 + command: '--default-authentication-plugin=mysql_native_password' + restart: always + healthcheck: + test: ['CMD-SHELL', 'mysqladmin ping -h 127.0.0.1 --password="$$(cat /run/secrets/db-password)" --silent'] + interval: 3s + retries: 5 + start_period: 30s + environment: + - MYSQL_ROOT_HOST=% + - MYSQL_ROOT_PASSWORD=root_secret_password + - MYSQL_DATABASE=torrust_tracker + - MYSQL_USER=db_user + - MYSQL_PASSWORD=db_user_secret_password + networks: + - server_side + ports: + - 3306:3306 + volumes: + - mysql_data:/var/lib/mysql + +networks: + server_side: {} + +volumes: + mysql_data: {} \ No newline at end of file diff --git a/config.toml.local b/config.toml.local new file mode 100644 index 000000000..baf272d5a --- /dev/null +++ b/config.toml.local @@ -0,0 +1,34 @@ +log_level = "info" +mode = "public" +db_driver = "Sqlite3" +db_path = "./storage/database/data.db" +announce_interval = 120 +min_announce_interval = 120 +max_peer_timeout = 900 +on_reverse_proxy = false +external_ip = "0.0.0.0" +tracker_usage_statistics = true +persistent_torrent_completed_stat = false +inactive_peer_cleanup_interval = 600 +remove_peerless_torrents = true + +[[udp_trackers]] +enabled = false +bind_address = "0.0.0.0:6969" + +[[http_trackers]] +enabled = false +bind_address = "0.0.0.0:7070" +ssl_enabled = false +ssl_cert_path = "" +ssl_key_path = "" + +[http_api] +enabled = true +bind_address = "127.0.0.1:1212" +ssl_enabled = false +ssl_cert_path = "" +ssl_key_path = "" + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..e5b4dfe74 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,250 @@ +# Docker + +## Requirements + +- Docker version 20.10.21 +- You need to create the `storage` directory with this structure and files: + +```s +$ tree storage/ +storage/ +├── database +│   └── data.db +└── ssl_certificates + ├── localhost.crt + └── localhost.key +``` + +> NOTE: you only need the `ssl_certificates` directory and certificates in case you have enabled SSL for the one HTTP tracker or the API. + +## Dev environment + +### With docker + +Build and run locally: + +```s +docker context use default +export TORRUST_TRACKER_USER_UID=1000 +./docker/bin/build.sh $TORRUST_TRACKER_USER_UID +./bin/install.sh +./docker/bin/run.sh $TORRUST_TRACKER_USER_UID +``` + +Run using the pre-built public docker image: + +```s +export TORRUST_TRACKER_USER_UID=1000 +docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust/tracker +``` + +> NOTES: +> +> - You have to create the SQLite DB (`data.db`) and configuration (`config.toml`) before running the tracker. See `bin/install.sh`. +> - You have to replace the user UID (`1000`) with yours. +> - Remember to switch to your default docker context `docker context use default`. + +### With docker-compose + +The docker-compose configuration includes the MySQL service configuration. If you want to use MySQL instead of SQLite you have to change your `config.toml` configuration: + +```toml +db_driver = "MySQL" +db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +``` + +If you want to inject an environment variable into docker-compose you can use the file `.env`. There is a template `.env.local`. + +Build and run it locally: + +```s +docker compose up --build +``` + +After running the "up" command you will have two running containers: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +06feacb91a9e torrust-tracker "cargo run" 18 minutes ago Up 4 seconds 0.0.0.0:1212->1212/tcp, :::1212->1212/tcp, 0.0.0.0:7070->7070/tcp, :::7070->7070/tcp, 0.0.0.0:6969->6969/udp, :::6969->6969/udp torrust-tracker-1 +34d29e792ee2 mysql:8.0 "docker-entrypoint.s…" 18 minutes ago Up 5 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp torrust-mysql-1 +``` + +And you should be able to use the application, for example making a request to the API: + + + +You can stop the containers with: + +```s +docker compose down +``` + +Additionally, you can delete all resources (containers, volumes, networks) with: + +```s +docker compose down -v +``` + +### Access Mysql with docker + +These are some useful commands for MySQL. + +Open a shell in the MySQL container using docker or docker-compose. + +```s +docker exec -it torrust-mysql-1 /bin/bash +docker compose exec mysql /bin/bash +``` + +Connect to MySQL from inside the MySQL container or from the host: + +```s +mysql -h127.0.0.1 -uroot -proot_secret_password +``` + +The when MySQL container is started the first time, it creates the database, user, and permissions needed. +If you see the error "Host is not allowed to connect to this MySQL server" you can check that users have the right permissions in the database. Make sure the user `root` and `db_user` can connect from any host (`%`). + +```s +mysql> SELECT host, user FROM mysql.user; ++-----------+------------------+ +| host | user | ++-----------+------------------+ +| % | db_user | +| % | root | +| localhost | mysql.infoschema | +| localhost | mysql.session | +| localhost | mysql.sys | +| localhost | root | ++-----------+------------------+ +6 rows in set (0.00 sec) +``` + +If the database, user or permissions are not created the reason could be the MySQL container volume can be corrupted. Delete it and start again the containers. + +### SSL Certificates + +You can use a certificate for localhost. You can create your [localhost certificate](https://letsencrypt.org/docs/certificates-for-localhost/#making-and-trusting-your-own-certificates) and use it in the `storage` folder and the configuration file (`config.toml`). For example: + +The storage folder must contain your certificates: + +```s +$ tree storage/ +storage/ +├── database +│   └── data.db +└── ssl_certificates + ├── localhost.crt + └── localhost.key +``` + +You have not enabled it in your `config.toml` file: + +```toml +... +[[http_trackers]] +enabled = true +bind_address = "0.0.0.0:7070" +ssl_enabled = true +ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +ssl_key_path = "./storage/ssl_certificates/localhost.key" + +[http_api] +enabled = true +bind_address = "0.0.0.0:1212" +ssl_enabled = true +ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +ssl_key_path = "./storage/ssl_certificates/localhost.key" +... +``` + +> NOTE: you can enable it independently for each HTTP tracker or the API. + +If you enable the SSL certificate for the API, for example, you can load the API with this URL: + + + +## Prod environment + +In this section, you will learn how to deploy the tracker to a single docker container in Azure Container Instances. + +> NOTE: Azure Container Instances is a solution when you want to run an isolated container. If you need full container orchestration, including service discovery across multiple containers, automatic scaling, and coordinated application upgrades, we recommend [Kubernetes](https://kubernetes.io/). + +Deploy to Azure Container Instance following [docker documentation](https://docs.docker.com/cloud/aci-integration/). + +You have to create the ACI context and the storage: + +```s +docker context create aci myacicontext +docker context use myacicontext +docker volume create test-volume --storage-account torrustracker +``` + +You need to create all the files needed by the application in the storage dir `storage/database`. + +And finally, you can run the container: + +```s +docker run \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume torrustracker/test-volume:/app/storage \ + registry.hub.docker.com/torrust/tracker:latest +``` + +Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. + +```s +docker run \ + --detach + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \latest + --volume torrustracker/test-volume:/app/storage \ + registry.hub.docker.com/torrust/tracker:latest +``` + +You should see something like this: + +```s +[+] Running 2/2 + â ¿ Group intelligent-hawking Created 5.0s + â ¿ intelligent-hawking Created 41.7s +2022-12-08T18:39:19.697869300+00:00 [torrust_tracker::logging][INFO] logging initialized. +2022-12-08T18:39:19.712651100+00:00 [torrust_tracker::jobs::udp_tracker][INFO] Starting UDP server on: 0.0.0.0:6969 +2022-12-08T18:39:19.712792700+00:00 [torrust_tracker::jobs::tracker_api][INFO] Starting Torrust API server on: 0.0.0.0:1212 +2022-12-08T18:39:19.725124+00:00 [torrust_tracker::jobs::tracker_api][INFO] Torrust API server started +``` + +You can see the container with: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND STATUS PORTS +intelligent-hawking registry.hub.docker.com/torrust/tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp +``` + +After a while, you can use the tracker API `http://4.236.213.57:1212/api/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. + +> NOTES: +> +> - [There is no support for mounting a single file](https://docs.docker.com/cloud/aci-container-features/#persistent-volumes), or mounting a subfolder from an `Azure File Share`. +> - [ACI does not allow port mapping](https://docs.docker.com/cloud/aci-integration/#exposing-ports). +> - [Azure file share volume mount requires the Linux container run as root](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations). +> - It can take some minutes until the public IP for the ACI container is available. +> - You can use the Azure web UI to download files from the storage. For example, the SQLite database. +> - [It seems you can only expose web interfaces on port 80 on Azure Container Instances](https://stackoverflow.com/a/56768087/3012842). Not official documentation! + +## Links + +- [Deploying Docker containers on Azure](https://docs.docker.com/cloud/aci-integration/). +- [Docker run options for ACI containers](https://docs.docker.com/cloud/aci-container-features/). +- [Quickstart: Deploy a container instance in Azure using the Docker CLI](https://learn.microsoft.com/en-us/azure/container-instances/quickstart-docker-cli). diff --git a/docker/bin/build.sh b/docker/bin/build.sh new file mode 100755 index 000000000..d77d1ad34 --- /dev/null +++ b/docker/bin/build.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} +TORRUST_TRACKER_RUN_AS_USER=${TORRUST_TRACKER_RUN_AS_USER:-appuser} + +echo "Building docker image ..." +echo "TORRUST_TRACKER_USER_UID: $TORRUST_TRACKER_USER_UID" +echo "TORRUST_TRACKER_RUN_AS_USER: $TORRUST_TRACKER_RUN_AS_USER" + +docker build \ + --build-arg UID="$TORRUST_TRACKER_USER_UID" \ + --build-arg RUN_AS_USER="$TORRUST_TRACKER_RUN_AS_USER" \ + -t torrust-tracker . diff --git a/docker/bin/install.sh b/docker/bin/install.sh new file mode 100755 index 000000000..a58969378 --- /dev/null +++ b/docker/bin/install.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +./docker/bin/build.sh +./bin/install.sh diff --git a/docker/bin/run.sh b/docker/bin/run.sh new file mode 100755 index 000000000..86465baeb --- /dev/null +++ b/docker/bin/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} +TORRUST_TRACKER_CONFIG=$(cat config.toml) + +docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --env TORRUST_TRACKER_CONFIG="$TORRUST_TRACKER_CONFIG" \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust-tracker diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml new file mode 100644 index 000000000..aade6272d --- /dev/null +++ b/packages/configuration/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "torrust-tracker-configuration" +description = "A library to provide configuration to the Torrust Tracker." +license = "AGPL-3.0" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +serde_with = "2.0" +config = "0.13" +toml = "0.5" +log = { version = "0.4", features = ["release_max_level_info"] } +thiserror = "1.0" +torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives" } +torrust-tracker-located-error = { version = "3.0.0-alpha.1", path = "../located-error" } +uuid = { version = "1", features = ["v4"] } diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs new file mode 100644 index 000000000..d42c82df9 --- /dev/null +++ b/packages/configuration/src/lib.rs @@ -0,0 +1,345 @@ +use std::collections::{HashMap, HashSet}; +use std::net::IpAddr; +use std::panic::Location; +use std::path::Path; +use std::str::FromStr; +use std::sync::Arc; +use std::{env, fs}; + +use config::{Config, ConfigError, File, FileFormat}; +use log::warn; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, NoneAsEmptyString}; +use thiserror::Error; +use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct UdpTracker { + pub enabled: bool, + pub bind_address: String, +} + +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HttpTracker { + pub enabled: bool, + pub bind_address: String, + pub ssl_enabled: bool, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_cert_path: Option, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_key_path: Option, +} + +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HttpApi { + pub enabled: bool, + pub bind_address: String, + pub ssl_enabled: bool, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_cert_path: Option, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_key_path: Option, + pub access_tokens: HashMap, +} + +impl HttpApi { + #[must_use] + pub fn contains_token(&self, token: &str) -> bool { + let tokens: HashMap = self.access_tokens.clone(); + let tokens: HashSet = tokens.into_values().collect(); + tokens.contains(token) + } +} + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +pub struct Configuration { + pub log_level: Option, + pub mode: TrackerMode, + pub db_driver: DatabaseDriver, + pub db_path: String, + pub announce_interval: u32, + pub min_announce_interval: u32, + pub max_peer_timeout: u32, + pub on_reverse_proxy: bool, + pub external_ip: Option, + pub tracker_usage_statistics: bool, + pub persistent_torrent_completed_stat: bool, + pub inactive_peer_cleanup_interval: u64, + pub remove_peerless_torrents: bool, + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub http_api: HttpApi, +} + +#[derive(Error, Debug)] +pub enum Error { + #[error("Unable to load from Environmental Variable: {source}")] + UnableToLoadFromEnvironmentVariable { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("Default configuration created at: `{path}`, please review and reload tracker, {location}")] + CreatedNewConfigHalt { + location: &'static Location<'static>, + path: String, + }, + + #[error("Failed processing the configuration: {source}")] + ConfigError { source: LocatedError<'static, ConfigError> }, +} + +impl From for Error { + #[track_caller] + fn from(err: ConfigError) -> Self { + Self::ConfigError { + source: Located(err).into(), + } + } +} + +impl Default for Configuration { + fn default() -> Self { + let mut configuration = Configuration { + log_level: Option::from(String::from("info")), + mode: TrackerMode::Public, + db_driver: DatabaseDriver::Sqlite3, + db_path: String::from("./storage/database/data.db"), + announce_interval: 120, + min_announce_interval: 120, + max_peer_timeout: 900, + on_reverse_proxy: false, + external_ip: Some(String::from("0.0.0.0")), + tracker_usage_statistics: true, + persistent_torrent_completed_stat: false, + inactive_peer_cleanup_interval: 600, + remove_peerless_torrents: true, + udp_trackers: Vec::new(), + http_trackers: Vec::new(), + http_api: HttpApi { + enabled: true, + bind_address: String::from("127.0.0.1:1212"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, + access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] + .iter() + .cloned() + .collect(), + }, + }; + configuration.udp_trackers.push(UdpTracker { + enabled: false, + bind_address: String::from("0.0.0.0:6969"), + }); + configuration.http_trackers.push(HttpTracker { + enabled: false, + bind_address: String::from("0.0.0.0:7070"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, + }); + configuration + } +} + +impl Configuration { + #[must_use] + pub fn get_ext_ip(&self) -> Option { + match &self.external_ip { + None => None, + Some(external_ip) => match IpAddr::from_str(external_ip) { + Ok(external_ip) => Some(external_ip), + Err(_) => None, + }, + } + } + + /// # Errors + /// + /// Will return `Err` if `path` does not exist or has a bad configuration. + pub fn load_from_file(path: &str) -> Result { + let config_builder = Config::builder(); + + #[allow(unused_assignments)] + let mut config = Config::default(); + + if Path::new(path).exists() { + config = config_builder.add_source(File::with_name(path)).build()?; + } else { + warn!("No config file found."); + warn!("Creating config file.."); + let config = Configuration::default(); + config.save_to_file(path)?; + return Err(Error::CreatedNewConfigHalt { + location: Location::caller(), + path: path.to_string(), + }); + } + + let torrust_config: Configuration = config.try_deserialize()?; + + Ok(torrust_config) + } + + /// # Errors + /// + /// Will return `Err` if the environment variable does not exist or has a bad configuration. + pub fn load_from_env_var(config_env_var_name: &str) -> Result { + match env::var(config_env_var_name) { + Ok(config_toml) => { + let config_builder = Config::builder() + .add_source(File::from_str(&config_toml, FileFormat::Toml)) + .build()?; + let config = config_builder.try_deserialize()?; + Ok(config) + } + Err(e) => Err(Error::UnableToLoadFromEnvironmentVariable { + source: (Arc::new(e) as Arc).into(), + }), + } + } + + /// # Errors + /// + /// Will return `Err` if `filename` does not exist or the user does not have + /// permission to read it. + pub fn save_to_file(&self, path: &str) -> Result<(), Error> { + let toml_string = toml::to_string(self).expect("Could not encode TOML value"); + fs::write(path, toml_string).expect("Could not write to file!"); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use crate::Configuration; + + #[cfg(test)] + fn default_config_toml() -> String { + let config = r#"log_level = "info" + mode = "public" + db_driver = "Sqlite3" + db_path = "./storage/database/data.db" + announce_interval = 120 + min_announce_interval = 120 + max_peer_timeout = 900 + on_reverse_proxy = false + external_ip = "0.0.0.0" + tracker_usage_statistics = true + persistent_torrent_completed_stat = false + inactive_peer_cleanup_interval = 600 + remove_peerless_torrents = true + + [[udp_trackers]] + enabled = false + bind_address = "0.0.0.0:6969" + + [[http_trackers]] + enabled = false + bind_address = "0.0.0.0:7070" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" + + [http_api] + enabled = true + bind_address = "127.0.0.1:1212" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" + + [http_api.access_tokens] + admin = "MyAccessToken" + "# + .lines() + .map(str::trim_start) + .collect::>() + .join("\n"); + config + } + + #[test] + fn configuration_should_have_default_values() { + let configuration = Configuration::default(); + + let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); + + assert_eq!(toml, default_config_toml()); + } + + #[test] + fn configuration_should_contain_the_external_ip() { + let configuration = Configuration::default(); + + assert_eq!(configuration.external_ip, Some(String::from("0.0.0.0"))); + } + + #[test] + fn configuration_should_be_saved_in_a_toml_config_file() { + use std::{env, fs}; + + use uuid::Uuid; + + // Build temp config file path + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); + + // Convert to argument type for Configuration::save_to_file + let config_file_path = temp_file; + let path = config_file_path.to_string_lossy().to_string(); + + let default_configuration = Configuration::default(); + + default_configuration + .save_to_file(&path) + .expect("Could not save configuration to file"); + + let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); + + assert_eq!(contents, default_config_toml()); + } + + #[cfg(test)] + fn create_temp_config_file_with_default_config() -> String { + use std::env; + use std::fs::File; + use std::io::Write; + + use uuid::Uuid; + + // Build temp config file path + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); + + // Convert to argument type for Configuration::load_from_file + let config_file_path = temp_file.clone(); + let path = config_file_path.to_string_lossy().to_string(); + + // Write file contents + let mut file = File::create(temp_file).unwrap(); + writeln!(&mut file, "{}", default_config_toml()).unwrap(); + + path + } + + #[test] + fn configuration_should_be_loaded_from_a_toml_config_file() { + let config_file_path = create_temp_config_file_with_default_config(); + + let configuration = Configuration::load_from_file(&config_file_path).expect("Could not load configuration from file"); + + assert_eq!(configuration, Configuration::default()); + } + + #[test] + fn http_api_configuration_should_check_if_it_contains_a_token() { + let configuration = Configuration::default(); + + assert!(configuration.http_api.contains_token("MyAccessToken")); + assert!(!configuration.http_api.contains_token("NonExistingToken")); + } +} diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml new file mode 100644 index 000000000..f67ef340f --- /dev/null +++ b/packages/located-error/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "torrust-tracker-located-error" +description = "A library to provide error decorator with the location and the source of the original error." +license = "AGPL-3.0" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +log = { version = "0.4", features = ["release_max_level_info"] } +thiserror = "1.0" diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs new file mode 100644 index 000000000..d45517e5a --- /dev/null +++ b/packages/located-error/src/lib.rs @@ -0,0 +1,103 @@ +// https://stackoverflow.com/questions/74336993/getting-line-numbers-with-when-using-boxdyn-stderrorerror + +use std::error::Error; +use std::panic::Location; +use std::sync::Arc; + +pub struct Located(pub E); + +#[derive(Debug)] +pub struct LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + source: Arc, + location: Box>, +} + +impl<'a, E> std::fmt::Display for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}, {}", self.source, self.location) + } +} + +impl<'a, E> Error for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync + 'static, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + Some(&self.source) + } +} + +impl<'a, E> Clone for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn clone(&self) -> Self { + LocatedError { + source: self.source.clone(), + location: self.location.clone(), + } + } +} + +#[allow(clippy::from_over_into)] +impl<'a, E> Into> for Located +where + E: Error + Send + Sync, + Arc: Clone, +{ + #[track_caller] + fn into(self) -> LocatedError<'a, E> { + let e = LocatedError { + source: Arc::new(self.0), + location: Box::new(*std::panic::Location::caller()), + }; + log::debug!("{e}"); + e + } +} + +#[allow(clippy::from_over_into)] +impl<'a> Into> for Arc { + #[track_caller] + fn into(self) -> LocatedError<'a, dyn std::error::Error + Send + Sync> { + LocatedError { + source: self, + location: Box::new(*std::panic::Location::caller()), + } + } +} + +#[cfg(test)] +mod tests { + use std::panic::Location; + + use super::LocatedError; + use crate::located_error::Located; + + #[derive(thiserror::Error, Debug)] + enum TestError { + #[error("Test")] + Test, + } + + #[track_caller] + fn get_caller_location() -> Location<'static> { + *Location::caller() + } + + #[test] + fn error_should_include_location() { + let e = TestError::Test; + + let b: LocatedError = Located(e).into(); + let l = get_caller_location(); + + assert_eq!(b.location.file(), l.file()); + } +} diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml new file mode 100644 index 000000000..bba45cf5d --- /dev/null +++ b/packages/primitives/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "torrust-tracker-primitives" +description = "A library with the primitive types shared by the Torrust tracker packages." +license = "AGPL-3.0" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +serde = { version = "1.0", features = ["derive"] } +derive_more = "0.99" diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs new file mode 100644 index 000000000..bcd48145f --- /dev/null +++ b/packages/primitives/src/lib.rs @@ -0,0 +1,27 @@ +use serde::{Deserialize, Serialize}; + +// TODO: Move to the database crate once that gets its own crate. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] +pub enum DatabaseDriver { + Sqlite3, + MySQL, +} + +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +pub enum TrackerMode { + // Will track every new info hash and serve every peer. + #[serde(rename = "public")] + Public, + + // Will only track whitelisted info hashes. + #[serde(rename = "listed")] + Listed, + + // Will only serve authenticated peers + #[serde(rename = "private")] + Private, + + // Will only track whitelisted info hashes and serve authenticated peers + #[serde(rename = "private_listed")] + PrivateListed, +} diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml new file mode 100644 index 000000000..4483f8f4d --- /dev/null +++ b/packages/test-helpers/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "torrust-tracker-test-helpers" +description = "A library providing helpers for testing the Torrust tracker." +license = "AGPL-3.0" +version.workspace = true +authors.workspace = true +edition.workspace = true + +[dependencies] +tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros", "signal"] } +lazy_static = "1.4" +rand = "0.8.5" +torrust-tracker-configuration = { version = "3.0.0-alpha.1", path = "../configuration"} +torrust-tracker-primitives = { version = "3.0.0-alpha.1", path = "../primitives"} diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs new file mode 100644 index 000000000..0b7a269ff --- /dev/null +++ b/packages/test-helpers/src/configuration.rs @@ -0,0 +1,123 @@ +use std::env; +use std::net::IpAddr; + +use torrust_tracker_configuration::Configuration; +use torrust_tracker_primitives::TrackerMode; + +use crate::random; + +/// This configuration is used for testing. It generates random config values so they do not collide +/// if you run more than one tracker at the same time. +/// +/// # Panics +/// +/// Will panic if it can't convert the temp file path to string +#[must_use] +pub fn ephemeral() -> Configuration { + // todo: disable services that are not needed. + // For example: a test for the UDP tracker should disable the API and HTTP tracker. + + let mut config = Configuration { + log_level: Some("off".to_owned()), // Change to `debug` for tests debugging + ..Default::default() + }; + + // Ephemeral socket address for API + let api_port = 0u16; + config.http_api.enabled = true; + config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + + // Ephemeral socket address for UDP tracker + let udp_port = 0u16; + config.udp_trackers[0].enabled = true; + config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &udp_port); + + // Ephemeral socket address for HTTP tracker + let http_port = 0u16; + config.http_trackers[0].enabled = true; + config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); + + // Ephemeral sqlite database + let temp_directory = env::temp_dir(); + let random_db_id = random::string(16); + let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); + config.db_path = temp_file.to_str().unwrap().to_owned(); + + config +} + +#[must_use] +pub fn ephemeral_with_reverse_proxy() -> Configuration { + let mut cfg = ephemeral(); + + cfg.on_reverse_proxy = true; + + cfg +} + +#[must_use] +pub fn ephemeral_without_reverse_proxy() -> Configuration { + let mut cfg = ephemeral(); + + cfg.on_reverse_proxy = false; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_public() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::Public; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_private() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::Private; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_whitelisted() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::Listed; + + cfg +} + +#[must_use] +pub fn ephemeral_mode_private_whitelisted() -> Configuration { + let mut cfg = ephemeral(); + + cfg.mode = TrackerMode::PrivateListed; + + cfg +} + +#[must_use] +pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { + let mut cfg = ephemeral(); + + cfg.external_ip = Some(ip.to_string()); + + cfg +} + +#[must_use] +pub fn ephemeral_ipv6() -> Configuration { + let mut cfg = ephemeral(); + + let ipv6 = format!("[::]:{}", 0); + + cfg.http_api.bind_address = ipv6.clone(); + cfg.http_trackers[0].bind_address = ipv6.clone(); + cfg.udp_trackers[0].bind_address = ipv6; + + cfg +} diff --git a/packages/test-helpers/src/lib.rs b/packages/test-helpers/src/lib.rs new file mode 100644 index 000000000..e0f350131 --- /dev/null +++ b/packages/test-helpers/src/lib.rs @@ -0,0 +1,2 @@ +pub mod configuration; +pub mod random; diff --git a/packages/test-helpers/src/random.rs b/packages/test-helpers/src/random.rs new file mode 100644 index 000000000..ffb2ccd6f --- /dev/null +++ b/packages/test-helpers/src/random.rs @@ -0,0 +1,7 @@ +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; + +/// Returns a random alphanumeric string of a certain size. +pub fn string(size: usize) -> String { + thread_rng().sample_iter(&Alphanumeric).take(size).map(char::from).collect() +} diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..3e878b271 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,4 @@ +max_width = 130 +imports_granularity = "Module" +group_imports = "StdExternalCrate" + diff --git a/src/app.rs b/src/app.rs new file mode 100644 index 000000000..5f75449ca --- /dev/null +++ b/src/app.rs @@ -0,0 +1,68 @@ +use std::sync::Arc; + +use log::warn; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::bootstrap::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::servers::http::Version; +use crate::tracker; + +/// # Panics +/// +/// Will panic if the socket address for API can't be parsed. +pub async fn start(config: Arc, tracker: Arc) -> Vec> { + let mut jobs: Vec> = Vec::new(); + + // Load peer keys + if tracker.is_private() { + tracker + .load_keys_from_database() + .await + .expect("Could not retrieve keys from database."); + } + + // Load whitelisted torrents + if tracker.is_whitelisted() { + tracker + .load_whitelist_from_database() + .await + .expect("Could not load whitelist from database."); + } + + // Start the UDP blocks + for udp_tracker_config in &config.udp_trackers { + if !udp_tracker_config.enabled { + continue; + } + + if tracker.is_private() { + warn!( + "Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", + udp_tracker_config.bind_address, config.mode + ); + } else { + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); + } + } + + // Start the HTTP blocks + for http_tracker_config in &config.http_trackers { + if !http_tracker_config.enabled { + continue; + } + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::V1).await); + } + + // Start HTTP API + if config.http_api.enabled { + jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); + } + + // Remove torrents without peers, every interval + if config.inactive_peer_cleanup_interval > 0 { + jobs.push(torrent_cleanup::start_job(&config, &tracker)); + } + + jobs +} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs new file mode 100644 index 000000000..e845feac0 --- /dev/null +++ b/src/bootstrap/app.rs @@ -0,0 +1,60 @@ +use std::env; +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use crate::bootstrap; +use crate::shared::clock::static_time; +use crate::shared::crypto::ephemeral_instance_keys; +use crate::tracker::services::tracker_factory; +use crate::tracker::Tracker; + +#[must_use] +pub fn setup() -> (Arc, Arc) { + let configuration = Arc::new(initialize_configuration()); + let tracker = initialize_with_configuration(&configuration); + + (configuration, tracker) +} + +#[must_use] +pub fn initialize_with_configuration(configuration: &Arc) -> Arc { + initialize_static(); + initialize_logging(configuration); + Arc::new(initialize_tracker(configuration)) +} + +pub fn initialize_static() { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); +} + +/// # Panics +/// +/// Will panic if it can't load the configuration from either +/// `./config.toml` file or the env var `TORRUST_TRACKER_CONFIG`. +#[must_use] +fn initialize_configuration() -> Configuration { + const CONFIG_PATH: &str = "./config.toml"; + const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; + + if env::var(CONFIG_ENV_VAR_NAME).is_ok() { + println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); + Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap() + } else { + println!("Loading configuration from config file {CONFIG_PATH}"); + Configuration::load_from_file(CONFIG_PATH).unwrap() + } +} + +#[must_use] +pub fn initialize_tracker(config: &Arc) -> Tracker { + tracker_factory(config.clone()) +} + +pub fn initialize_logging(config: &Arc) { + bootstrap::logging::setup(config); +} diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs new file mode 100644 index 000000000..43bd0076f --- /dev/null +++ b/src/bootstrap/jobs/http_tracker.rs @@ -0,0 +1,74 @@ +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use log::info; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::HttpTracker; + +use crate::servers::http::v1::launcher; +use crate::servers::http::Version; +use crate::tracker; + +#[derive(Debug)] +pub struct ServerJobStarted(); + +pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { + match version { + Version::V1 => start_v1(config, tracker.clone()).await, + } +} + +/// # Panics +/// +/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. +async fn start_v1(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); + + let (tx, rx) = oneshot::channel::(); + + // Run the API server + let join_handle = tokio::spawn(async move { + if !ssl_enabled { + info!("Starting Torrust HTTP tracker server on: http://{}", bind_addr); + + let handle = launcher::start(bind_addr, tracker); + + tx.send(ServerJobStarted()) + .expect("the HTTP tracker server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust HTTP tracker server on http://{} stopped", bind_addr); + } + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust HTTP tracker server on: https://{}", bind_addr); + + let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) + .await + .unwrap(); + + let handle = launcher::start_tls(bind_addr, ssl_config, tracker); + + tx.send(ServerJobStarted()) + .expect("the HTTP tracker server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust HTTP tracker server on https://{} stopped", bind_addr); + } + } + }); + + // Wait until the HTTP tracker server job is running + match rx.await { + Ok(_msg) => info!("Torrust HTTP tracker server started"), + Err(e) => panic!("the HTTP tracker server was dropped: {e}"), + } + + join_handle +} diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs new file mode 100644 index 000000000..ba44a56ad --- /dev/null +++ b/src/bootstrap/jobs/mod.rs @@ -0,0 +1,4 @@ +pub mod http_tracker; +pub mod torrent_cleanup; +pub mod tracker_apis; +pub mod udp_tracker; diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs new file mode 100644 index 000000000..64240bffe --- /dev/null +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -0,0 +1,39 @@ +use std::sync::Arc; + +use chrono::Utc; +use log::info; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::tracker; + +#[must_use] +pub fn start_job(config: &Arc, tracker: &Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(tracker); + let interval = config.inactive_peer_cleanup_interval; + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Stopping torrent cleanup job.."); + break; + } + _ = interval.tick() => { + if let Some(tracker) = weak_tracker.upgrade() { + let start_time = Utc::now().time(); + info!("Cleaning up torrents.."); + tracker.cleanup_torrents().await; + info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); + } else { + break; + } + } + } + } + }) +} diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs new file mode 100644 index 000000000..cdebc21a8 --- /dev/null +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -0,0 +1,65 @@ +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use log::info; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::HttpApi; + +use crate::servers::apis::server; +use crate::tracker; + +#[derive(Debug)] +pub struct ApiServerJobStarted(); + +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. +pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config + .bind_address + .parse::() + .expect("Tracker API bind_address invalid."); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); + + let (tx, rx) = oneshot::channel::(); + + // Run the API server + let join_handle = tokio::spawn(async move { + if !ssl_enabled { + info!("Starting Torrust APIs server on: http://{}", bind_addr); + + let handle = server::start(bind_addr, tracker); + + tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust APIs server on http://{} stopped", bind_addr); + } + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust APIs server on: https://{}", bind_addr); + + let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) + .await + .unwrap(); + + let handle = server::start_tls(bind_addr, ssl_config, tracker); + + tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); + + if let Ok(()) = handle.await { + info!("Torrust APIs server on https://{} stopped", bind_addr); + } + } + }); + + // Wait until the APIs server job is running + match rx.await { + Ok(_msg) => info!("Torrust APIs server started"), + Err(e) => panic!("the API server was dropped: {e}"), + } + + join_handle +} diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs new file mode 100644 index 000000000..138222daf --- /dev/null +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -0,0 +1,26 @@ +use std::sync::Arc; + +use log::{error, info, warn}; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::UdpTracker; + +use crate::servers::udp::server::Udp; +use crate::tracker; + +#[must_use] +pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { + let bind_addr = config.bind_address.clone(); + + tokio::spawn(async move { + match Udp::new(&bind_addr).await { + Ok(udp_server) => { + info!("Starting UDP server on: udp://{}", bind_addr); + udp_server.start(tracker).await; + } + Err(e) => { + warn!("Could not start UDP tracker on: udp://{}", bind_addr); + error!("{}", e); + } + } + }) +} diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs new file mode 100644 index 000000000..83e2c9360 --- /dev/null +++ b/src/bootstrap/logging.rs @@ -0,0 +1,47 @@ +use std::str::FromStr; +use std::sync::Once; + +use log::{info, LevelFilter}; +use torrust_tracker_configuration::Configuration; + +static INIT: Once = Once::new(); + +pub fn setup(cfg: &Configuration) { + let level = config_level_or_default(&cfg.log_level); + + if level == log::LevelFilter::Off { + return; + } + + INIT.call_once(|| { + stdout_config(level); + }); +} + +fn config_level_or_default(log_level: &Option) -> LevelFilter { + match log_level { + None => log::LevelFilter::Info, + Some(level) => LevelFilter::from_str(level).unwrap(), + } +} + +fn stdout_config(level: LevelFilter) { + if let Err(_err) = fern::Dispatch::new() + .format(|out, message, record| { + out.finish(format_args!( + "{} [{}][{}] {}", + chrono::Local::now().format("%+"), + record.target(), + record.level(), + message + )); + }) + .level(level) + .chain(std::io::stdout()) + .apply() + { + panic!("Failed to initialize logging.") + } + + info!("logging initialized."); +} diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs new file mode 100644 index 000000000..e3b6467ee --- /dev/null +++ b/src/bootstrap/mod.rs @@ -0,0 +1,3 @@ +pub mod app; +pub mod jobs; +pub mod logging; diff --git a/src/common.rs b/src/common.rs deleted file mode 100644 index 4d2f5ec71..000000000 --- a/src/common.rs +++ /dev/null @@ -1,242 +0,0 @@ -use serde::{Deserialize, Serialize}; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - -pub const MAX_SCRAPE_TORRENTS: u8 = 74; -pub const AUTH_KEY_LENGTH: usize = 32; - -#[repr(u32)] -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub enum Actions { - Connect = 0, - Announce = 1, - Scrape = 2, - Error = 3, -} - -#[derive(Serialize, Deserialize)] -#[serde(remote = "AnnounceEvent")] -pub enum AnnounceEventDef { - Started, - Stopped, - Completed, - None -} - -#[derive(Serialize, Deserialize)] -#[serde(remote = "NumberOfBytes")] -pub struct NumberOfBytesDef(pub i64); - -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Ord)] -pub struct InfoHash(pub [u8; 20]); - -impl InfoHash { - pub fn to_string(&self) -> String { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - String::from(std::str::from_utf8(bytes_out).unwrap()) - } -} - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self { 0: [0u8; 20] }; - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl std::cmp::PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - self.0.partial_cmp(&other.0) - } -} - -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = InfoHash { 0: [0u8; 20] }; - ret.0.clone_from_slice(data); - return ret; - } -} - -impl std::convert::Into for [u8; 20] { - fn into(self) -> InfoHash { - InfoHash { 0: self } - } -} - -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a 40 character long string", - )); - } - - let mut res = InfoHash { 0: [0u8; 20] }; - - if let Err(_) = binascii::hex2bin(v.as_bytes(), &mut res.0) { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a hexadecimal string", - )); - } else { - return Ok(res); - } - } -} - -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord)] -pub struct PeerId(pub [u8; 20]); - -impl PeerId { - pub fn to_string(&self) -> String { - let mut buffer = [0u8; 20]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); - return if let Some(bytes_out) = bytes_out { - String::from(std::str::from_utf8(bytes_out).unwrap()) - } else { - "".to_string() - } - } -} - -impl PeerId { - pub fn get_client_name(&self) -> Option<&'static str> { - if self.0[0] == b'M' { - return Some("BitTorrent"); - } - if self.0[0] == b'-' { - let name = match &self.0[1..3] { - b"AG" => "Ares", - b"A~" => "Ares", - b"AR" => "Arctic", - b"AV" => "Avicora", - b"AX" => "BitPump", - b"AZ" => "Azureus", - b"BB" => "BitBuddy", - b"BC" => "BitComet", - b"BF" => "Bitflu", - b"BG" => "BTG (uses Rasterbar libtorrent)", - b"BR" => "BitRocket", - b"BS" => "BTSlave", - b"BX" => "~Bittorrent X", - b"CD" => "Enhanced CTorrent", - b"CT" => "CTorrent", - b"DE" => "DelugeTorrent", - b"DP" => "Propagate Data Client", - b"EB" => "EBit", - b"ES" => "electric sheep", - b"FT" => "FoxTorrent", - b"FW" => "FrostWire", - b"FX" => "Freebox BitTorrent", - b"GS" => "GSTorrent", - b"HL" => "Halite", - b"HN" => "Hydranode", - b"KG" => "KGet", - b"KT" => "KTorrent", - b"LH" => "LH-ABC", - b"LP" => "Lphant", - b"LT" => "libtorrent", - b"lt" => "libTorrent", - b"LW" => "LimeWire", - b"MO" => "MonoTorrent", - b"MP" => "MooPolice", - b"MR" => "Miro", - b"MT" => "MoonlightTorrent", - b"NX" => "Net Transport", - b"PD" => "Pando", - b"qB" => "qBittorrent", - b"QD" => "QQDownload", - b"QT" => "Qt 4 Torrent example", - b"RT" => "Retriever", - b"S~" => "Shareaza alpha/beta", - b"SB" => "~Swiftbit", - b"SS" => "SwarmScope", - b"ST" => "SymTorrent", - b"st" => "sharktorrent", - b"SZ" => "Shareaza", - b"TN" => "TorrentDotNET", - b"TR" => "Transmission", - b"TS" => "Torrentstorm", - b"TT" => "TuoTu", - b"UL" => "uLeecher!", - b"UT" => "µTorrent", - b"UW" => "µTorrent Web", - b"VG" => "Vagaa", - b"WD" => "WebTorrent Desktop", - b"WT" => "BitLet", - b"WW" => "WebTorrent", - b"WY" => "FireTorrent", - b"XL" => "Xunlei", - b"XT" => "XanTorrent", - b"XX" => "Xtorrent", - b"ZT" => "ZipTorrent", - _ => return None, - }; - Some(name) - } else { - None - } - } -} -impl Serialize for PeerId { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, { - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - let id = std::str::from_utf8(&tmp).ok(); - - #[derive(Serialize)] - struct PeerIdInfo<'a> { - id: Option<&'a str>, - client: Option<&'a str>, - } - - let obj = PeerIdInfo { - id, - client: self.get_client_name(), - }; - obj.serialize(serializer) - } -} diff --git a/src/config.rs b/src/config.rs deleted file mode 100644 index 94b37464d..000000000 --- a/src/config.rs +++ /dev/null @@ -1,208 +0,0 @@ -pub use crate::tracker::TrackerMode; -use serde::{Serialize, Deserialize, Serializer}; -use std; -use std::collections::HashMap; -use std::fs; -use toml; -use std::net::{IpAddr}; -use std::path::Path; -use std::str::FromStr; -use config::{ConfigError, Config, File}; - -#[derive(Serialize, Deserialize, PartialEq)] -pub enum TrackerServer { - UDP, - HTTP -} - -#[derive(Serialize, Deserialize)] -pub struct UdpTrackerConfig { - pub enabled: bool, - pub bind_address: String, -} - -#[derive(Serialize, Deserialize)] -pub struct HttpTrackerConfig { - pub enabled: bool, - pub bind_address: String, - pub ssl_enabled: bool, - pub ssl_bind_address: String, - #[serde(serialize_with = "none_as_empty_string")] - pub ssl_cert_path: Option, - #[serde(serialize_with = "none_as_empty_string")] - pub ssl_key_path: Option -} - -impl HttpTrackerConfig { - pub fn is_ssl_enabled(&self) -> bool { - self.ssl_enabled && self.ssl_cert_path.is_some() && self.ssl_key_path.is_some() - } -} - -#[derive(Serialize, Deserialize)] -pub struct HttpApiConfig { - pub enabled: bool, - pub bind_address: String, - pub access_tokens: HashMap, -} - -#[derive(Serialize, Deserialize)] -pub struct Configuration { - pub log_level: Option, - pub mode: TrackerMode, - pub db_path: String, - pub persistence: bool, - pub cleanup_interval: Option, - pub cleanup_peerless: bool, - pub external_ip: Option, - pub announce_interval: u32, - pub announce_interval_min: u32, - pub peer_timeout: u32, - pub on_reverse_proxy: bool, - pub udp_trackers: Vec, - pub http_trackers: Vec, - pub http_api: HttpApiConfig, -} - -#[derive(Debug)] -pub enum ConfigurationError { - IOError(std::io::Error), - ParseError(toml::de::Error), - TrackerModeIncompatible, -} - -impl std::fmt::Display for ConfigurationError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - ConfigurationError::IOError(e) => e.fmt(f), - ConfigurationError::ParseError(e) => e.fmt(f), - _ => write!(f, "{:?}", self) - } - } -} - -impl std::error::Error for ConfigurationError {} - -pub fn none_as_empty_string(option: &Option, serializer: S) -> Result - where - T: Serialize, - S: Serializer, -{ - if let Some(value) = option { - value.serialize(serializer) - } else { - "".serialize(serializer) - } -} - -impl Configuration { - pub fn load(data: &[u8]) -> Result { - toml::from_slice(data) - } - - pub fn load_file(path: &str) -> Result { - match std::fs::read(path) { - Err(e) => Err(ConfigurationError::IOError(e)), - Ok(data) => { - match Self::load(data.as_slice()) { - Ok(cfg) => { - Ok(cfg) - }, - Err(e) => Err(ConfigurationError::ParseError(e)), - } - } - } - } - - pub fn get_ext_ip(&self) -> Option { - match &self.external_ip { - None => None, - Some(external_ip) => { - match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None - } - } - } - } -} - -impl Configuration { - pub fn default() -> Configuration { - let mut configuration = Configuration { - log_level: Option::from(String::from("info")), - mode: TrackerMode::PublicMode, - db_path: String::from("data.db"), - persistence: false, - cleanup_interval: Some(600), - cleanup_peerless: true, - external_ip: Some(String::from("0.0.0.0")), - announce_interval: 120, - announce_interval_min: 120, - peer_timeout: 900, - on_reverse_proxy: false, - udp_trackers: Vec::new(), - http_trackers: Vec::new(), - http_api: HttpApiConfig { - enabled: true, - bind_address: String::from("127.0.0.1:1212"), - access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), - } - }; - configuration.udp_trackers.push( - UdpTrackerConfig{ - enabled: false, - bind_address: String::from("0.0.0.0:6969") - } - ); - configuration.http_trackers.push( - HttpTrackerConfig{ - enabled: false, - bind_address: String::from("0.0.0.0:6969"), - ssl_enabled: false, - ssl_bind_address: String::from("0.0.0.0:6868"), - ssl_cert_path: None, - ssl_key_path: None - } - ); - configuration - } - - pub fn verify(&self) -> Result<(), ConfigurationError> { - // UDP is not secure for sending private keys - if self.mode == TrackerMode::PrivateMode || self.mode == TrackerMode::PrivateListedMode { - return Err(ConfigurationError::TrackerModeIncompatible) - } - - Ok(()) - } - - pub fn load_from_file() -> Result { - let mut config = Config::new(); - - const CONFIG_PATH: &str = "config.toml"; - - if Path::new(CONFIG_PATH).exists() { - config.merge(File::with_name(CONFIG_PATH))?; - } else { - eprintln!("No config file found."); - eprintln!("Creating config file.."); - let config = Configuration::default(); - let _ = config.save_to_file(); - return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))) - } - - let torrust_config: Configuration = config.try_into().map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; - - match torrust_config.verify() { - Ok(_) => Ok(torrust_config), - Err(e) => Err(ConfigError::Message(format!("Errors while processing config: {}.", e))) - } - } - - pub fn save_to_file(&self) -> Result<(), ()>{ - let toml_string = toml::to_string(self).expect("Could not encode TOML value"); - fs::write("config.toml", toml_string).expect("Could not write to file!"); - Ok(()) - } -} diff --git a/src/database.rs b/src/database.rs deleted file mode 100644 index 82d26d6d8..000000000 --- a/src/database.rs +++ /dev/null @@ -1,196 +0,0 @@ -use std::collections::BTreeMap; -use crate::{InfoHash, AUTH_KEY_LENGTH, TorrentEntry}; -use log::debug; -use r2d2_sqlite::{SqliteConnectionManager, rusqlite}; -use r2d2::{Pool}; -use r2d2_sqlite::rusqlite::NO_PARAMS; -use crate::key_manager::AuthKey; -use std::str::FromStr; - -pub struct SqliteDatabase { - pool: Pool -} - -impl SqliteDatabase { - pub fn new(db_path: &str) -> Result { - let sqlite_connection_manager = SqliteConnectionManager::file(db_path); - let sqlite_pool = r2d2::Pool::new(sqlite_connection_manager).expect("Failed to create r2d2 SQLite connection pool."); - let sqlite_database = SqliteDatabase { - pool: sqlite_pool - }; - - if let Err(error) = SqliteDatabase::create_database_tables(&sqlite_database.pool) { - return Err(error) - }; - - Ok(sqlite_database) - } - - pub fn create_database_tables(pool: &Pool) -> Result { - let create_whitelist_table = " - CREATE TABLE IF NOT EXISTS whitelist ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE - );".to_string(); - - let create_torrents_table = " - CREATE TABLE IF NOT EXISTS torrents ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE, - completed INTEGER DEFAULT 0 NOT NULL - );".to_string(); - - let create_keys_table = format!(" - CREATE TABLE IF NOT EXISTS keys ( - id integer PRIMARY KEY AUTOINCREMENT, - key VARCHAR({}) NOT NULL UNIQUE, - valid_until INT(10) NOT NULL - );", AUTH_KEY_LENGTH as i8); - - let conn = pool.get().unwrap(); - match conn.execute(&create_whitelist_table, NO_PARAMS) { - Ok(updated) => { - match conn.execute(&create_keys_table, NO_PARAMS) { - Ok(updated2) => { - match conn.execute(&create_torrents_table, NO_PARAMS) { - Ok(updated3) => Ok(updated + updated2 + updated3), - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - - pub async fn load_persistent_torrent_data(&self) -> Result, rusqlite::Error> { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; - - let torrent_iter = stmt.query_map(NO_PARAMS, |row| { - let info_hash_string: String = row.get(0)?; - let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); - let completed: u32 = row.get(1)?; - Ok((info_hash, completed)) - })?; - - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok() ).collect(); - - Ok(torrents) - } - - pub async fn save_persistent_torrent_data(&self, torrents: &BTreeMap) -> Result<(), rusqlite::Error> { - let mut conn = self.pool.get().unwrap(); - let db_transaction = conn.transaction()?; - - for (info_hash, torrent_entry) in torrents { - let (_seeders, completed, _leechers) = torrent_entry.get_stats(); - let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]); - } - - let _ = db_transaction.commit(); - - Ok(()) - } - - pub async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; - let mut rows = stmt.query(&[info_hash])?; - - if let Some(row) = rows.next()? { - let info_hash: String = row.get(0).unwrap(); - - // should never be able to fail - Ok(InfoHash::from_str(&info_hash).unwrap()) - } else { - Err(rusqlite::Error::QueryReturnedNoRows) - } - } - - pub async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - - pub async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - - pub async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; - let mut rows = stmt.query(&[key.to_string()])?; - - if let Some(row) = rows.next()? { - let key: String = row.get(0).unwrap(); - let valid_until_i64: i64 = row.get(1).unwrap(); - - Ok(AuthKey { - key, - valid_until: Some(valid_until_i64 as u64) - }) - } else { - Err(rusqlite::Error::QueryReturnedNoRows) - } - } - - pub async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()] - ) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - - pub async fn remove_key_from_keys(&self, key: String) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } -} diff --git a/src/http_api_server.rs b/src/http_api_server.rs deleted file mode 100644 index a6bee4a14..000000000 --- a/src/http_api_server.rs +++ /dev/null @@ -1,310 +0,0 @@ -use crate::tracker::{TorrentTracker}; -use serde::{Deserialize, Serialize}; -use std::cmp::min; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use warp::{filters, reply, reply::Reply, serve, Filter, Server}; -use crate::TorrentPeer; -use super::common::*; - -#[derive(Deserialize, Debug)] -struct TorrentInfoQuery { - offset: Option, - limit: Option, -} - -#[derive(Serialize)] -struct Torrent<'a> { - info_hash: &'a InfoHash, - seeders: u32, - completed: u32, - leechers: u32, - #[serde(skip_serializing_if = "Option::is_none")] - peers: Option>, -} - -#[derive(Serialize)] -struct Stats { - torrents: u32, - seeders: u32, - completed: u32, - leechers: u32, - tcp4_connections_handled: u32, - tcp4_announces_handled: u32, - tcp4_scrapes_handled: u32, - tcp6_connections_handled: u32, - tcp6_announces_handled: u32, - tcp6_scrapes_handled: u32, - udp4_connections_handled: u32, - udp4_announces_handled: u32, - udp4_scrapes_handled: u32, - udp6_connections_handled: u32, - udp6_announces_handled: u32, - udp6_scrapes_handled: u32, -} - -#[derive(Serialize, Debug)] -#[serde(tag = "status", rename_all = "snake_case")] -enum ActionStatus<'a> { - Ok, - Err { reason: std::borrow::Cow<'a, str> }, -} - -impl warp::reject::Reject for ActionStatus<'static> {} - -fn authenticate(tokens: HashMap) -> impl Filter + Clone { - #[derive(Deserialize)] - struct AuthToken { - token: Option, - } - - let tokens: HashSet = tokens.into_iter().map(|(_, v)| v).collect(); - - let tokens = Arc::new(tokens); - warp::filters::any::any() - .map(move || tokens.clone()) - .and(filters::query::query::()) - .and_then(|tokens: Arc>, token: AuthToken| { - async move { - match token.token { - Some(token) => { - if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { reason: "token not valid".into() })) - } - - Ok(()) - } - None => Err(warp::reject::custom(ActionStatus::Err { reason: "unauthorized".into() })) - } - } - }) - .untuple_one() -} - -pub fn build_server(tracker: Arc) -> Server + Clone + Send + Sync + 'static> { - // GET /api/torrents?offset=:u32&limit=:u32 - // View torrent list - let api_torrents = tracker.clone(); - let view_torrent_list = filters::method::get() - .and(filters::path::path("torrents")) - .and(filters::path::end()) - .and(filters::query::query()) - .map(move |limits| { - let tracker = api_torrents.clone(); - (limits, tracker) - }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| { - async move { - let offset = limits.offset.unwrap_or(0); - let limit = min(limits.limit.unwrap_or(1000), 4000); - - let db = tracker.get_torrents().await; - let results: Vec<_> = db - .iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - Torrent { - info_hash, - seeders, - completed, - leechers, - peers: None, - } - }) - .skip(offset as usize) - .take(limit as usize) - .collect(); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - } - }); - - // GET /api/stats - // View tracker status - let api_stats = tracker.clone(); - let view_stats_list = filters::method::get() - .and(filters::path::path("stats")) - .and(filters::path::end()) - .map(move || { - let tracker = api_stats.clone(); - tracker - }) - .and_then(|tracker: Arc| { - async move { - let mut results = Stats{ - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0 - }; - let db = tracker.get_torrents().await; - let _: Vec<_> = db - .iter() - .map(|(_info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }) - .collect(); - let stats = tracker.get_stats().await; - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - } - }); - - // GET /api/torrent/:info_hash - // View torrent info - let t2 = tracker.clone(); - let view_torrent_info = filters::method::get() - .and(filters::path::path("torrent")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t2.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - let db = tracker.get_torrents().await; - let torrent_entry_option = db.get(&info_hash); - - if torrent_entry_option.is_none() { - return Err(warp::reject::custom(ActionStatus::Err { reason: "torrent does not exist".into() })) - } - - let torrent_entry = torrent_entry_option.unwrap(); - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - let peers = torrent_entry.get_peers(None); - - Ok(reply::json(&Torrent { - info_hash: &info_hash, - seeders, - completed, - leechers, - peers: Some(peers), - })) - } - }); - - // DELETE /api/whitelist/:info_hash - // Delete info hash from whitelist - let t3 = tracker.clone(); - let delete_torrent = filters::method::delete() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t3.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to remove torrent from whitelist".into() })) - } - } - }); - - // POST /api/whitelist/:info_hash - // Add info hash to whitelist - let t4 = tracker.clone(); - let add_torrent = filters::method::post() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t4.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to whitelist torrent".into() })) - } - } - }); - - // POST /api/key/:seconds_valid - // Generate new key - let t5 = tracker.clone(); - let create_key = filters::method::post() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |seconds_valid: u64| { - let tracker = t5.clone(); - (seconds_valid, tracker) - }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| { - async move { - match tracker.generate_auth_key(seconds_valid).await { - Ok(auth_key) => Ok(warp::reply::json(&auth_key)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into() })) - } - } - }); - - // DELETE /api/key/:key - // Delete key - let t6 = tracker.clone(); - let delete_key = filters::method::delete() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |key: String| { - let tracker = t6.clone(); - (key, tracker) - }) - .and_then(|(key, tracker): (String, Arc)| { - async move { - match tracker.remove_auth_key(key).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to delete key".into() })) - } - } - }); - - let api_routes = - filters::path::path("api") - .and(view_torrent_list - .or(delete_torrent) - .or(view_torrent_info) - .or(view_stats_list) - .or(add_torrent) - .or(create_key) - .or(delete_key) - ); - - let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); - - serve(server) -} diff --git a/src/key_manager.rs b/src/key_manager.rs deleted file mode 100644 index b1f16f1dc..000000000 --- a/src/key_manager.rs +++ /dev/null @@ -1,125 +0,0 @@ -use super::common::AUTH_KEY_LENGTH; -use crate::utils::current_time; -use rand::{thread_rng, Rng}; -use rand::distributions::Alphanumeric; -use serde::Serialize; -use log::debug; -use derive_more::{Display, Error}; - -pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { - let key: String = thread_rng() - .sample_iter(&Alphanumeric) - .take(AUTH_KEY_LENGTH) - .map(char::from) - .collect(); - - debug!("Generated key: {}, valid for: {} seconds", key, seconds_valid); - - AuthKey { - key, - valid_until: Some(current_time() + seconds_valid), - } -} - -pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { - let current_time = current_time(); - if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid) } - if auth_key.valid_until.unwrap() < current_time { return Err(Error::KeyExpired) } - - Ok(()) -} - -#[derive(Serialize, Debug, Eq, PartialEq, Clone)] -pub struct AuthKey { - pub key: String, - pub valid_until: Option, -} - -impl AuthKey { - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { - if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(AuthKey { - key, - valid_until: None, - }) - } else { - None - } - } - - pub fn from_string(key: &str) -> Option { - if key.len() != AUTH_KEY_LENGTH { - None - } else { - Some(AuthKey { - key: key.to_string(), - valid_until: None, - }) - } - } -} - -#[derive(Debug, Display, PartialEq, Error)] -#[allow(dead_code)] -pub enum Error { - #[display(fmt = "Key could not be verified.")] - KeyVerificationError, - #[display(fmt = "Key is invalid.")] - KeyInvalid, - #[display(fmt = "Key has expired.")] - KeyExpired -} - -impl From for Error { - fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - eprintln!("{}", e); - Error::KeyVerificationError - } -} - -#[cfg(test)] -mod tests { - use crate::key_manager; - - #[test] - fn auth_key_from_buffer() { - let auth_key = key_manager::AuthKey::from_buffer( - [ - 89, 90, 83, 108, - 52, 108, 77, 90, - 117, 112, 82, 117, - 79, 112, 83, 82, - 67, 51, 107, 114, - 73, 75, 82, 53, - 66, 80, 66, 49, - 52, 110, 114, 74] - ); - - assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().key, "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"); - } - - #[test] - fn auth_key_from_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = key_manager::AuthKey::from_string(key_string); - - assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().key, key_string); - } - - #[test] - fn generate_valid_auth_key() { - let auth_key = key_manager::generate_auth_key(9999); - - assert!(key_manager::verify_auth_key(&auth_key).is_ok()); - } - - #[test] - fn generate_expired_auth_key() { - let mut auth_key = key_manager::generate_auth_key(0); - auth_key.valid_until = Some(0); - - assert!(key_manager::verify_auth_key(&auth_key).is_err()); - } -} diff --git a/src/lib.rs b/src/lib.rs index c055cfae4..bd775f8cf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,17 +1,8 @@ -pub mod config; +pub mod app; +pub mod bootstrap; +pub mod servers; +pub mod shared; pub mod tracker; -pub mod http_api_server; -pub mod common; -pub mod utils; -pub mod database; -pub mod key_manager; -pub mod logging; -pub mod torrust_udp_tracker; -pub mod torrust_http_tracker; -pub use self::config::*; -pub use torrust_udp_tracker::server::*; -pub use torrust_http_tracker::server::*; -pub use self::tracker::*; -pub use self::http_api_server::*; -pub use self::common::*; +#[macro_use] +extern crate lazy_static; diff --git a/src/logging.rs b/src/logging.rs deleted file mode 100644 index 580e35094..000000000 --- a/src/logging.rs +++ /dev/null @@ -1,39 +0,0 @@ -use log::info; -use crate::Configuration; - -pub fn setup_logging(cfg: &Configuration) { - let log_level = match &cfg.log_level { - None => log::LevelFilter::Info, - Some(level) => { - match level.as_str() { - "off" => log::LevelFilter::Off, - "trace" => log::LevelFilter::Trace, - "debug" => log::LevelFilter::Debug, - "info" => log::LevelFilter::Info, - "warn" => log::LevelFilter::Warn, - "error" => log::LevelFilter::Error, - _ => { - panic!("Unknown log level encountered: '{}'", level.as_str()); - } - } - } - }; - - if let Err(_err) = fern::Dispatch::new() - .format(|out, message, record| { - out.finish(format_args!( - "{} [{}][{}] {}", - chrono::Local::now().format("%+"), - record.target(), - record.level(), - message - )) - }) - .level(log_level) - .chain(std::io::stdout()) - .apply() - { - panic!("Failed to initialize logging.") - } - info!("logging initialized."); -} diff --git a/src/main.rs b/src/main.rs index 08610d24a..87c0fc367 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,144 +1,20 @@ -use std::net::SocketAddr; -use std::sync::Arc; use log::info; -use tokio::task::JoinHandle; -use torrust_tracker::{Configuration, http_api_server, HttpApiConfig, HttpTrackerConfig, logging, TorrentTracker, UdpServer, UdpTrackerConfig}; -use torrust_tracker::torrust_http_tracker::server::HttpServer; +use torrust_tracker::{app, bootstrap}; #[tokio::main] async fn main() { - // torrust config - let config = match Configuration::load_from_file() { - Ok(config) => Arc::new(config), - Err(error) => { - panic!("{}", error) - } - }; - - // the singleton torrent tracker that gets passed to the HTTP and UDP server - let tracker = Arc::new(TorrentTracker::new(config.clone()).unwrap_or_else(|e| { - panic!("{}", e) - })); - - logging::setup_logging(&config); - - // load persistent torrents if enabled - if config.persistence { - info!("Loading persistent torrents into memory..."); - if tracker.load_torrents().await.is_err() { - panic!("Could not load persistent torrents.") - }; - info!("Persistent torrents loaded."); - } - - // start torrent cleanup job (periodically removes old peers) - let _torrent_cleanup_job = start_torrent_cleanup_job(config.clone(), tracker.clone()).unwrap(); - - // start HTTP API server - if config.http_api.enabled { - let _api_server = start_api_server(&config.http_api, tracker.clone()); - } + let (config, tracker) = bootstrap::app::setup(); - let (tx, rx) = tokio::sync::watch::channel(false); - let mut udp_server_handles = Vec::new(); + let jobs = app::start(config.clone(), tracker.clone()).await; - // start the udp blocks - for udp_tracker in &config.udp_trackers { - // used to send kill signal to thread - - if udp_tracker.enabled { - udp_server_handles.push( - start_udp_tracker_server(&udp_tracker, tracker.clone(), rx.clone()).await - ) - } - } - - // start the http blocks - for http_tracker in &config.http_trackers { - let _ = start_http_tracker_server(&http_tracker, tracker.clone(), true); - let _ = start_http_tracker_server(&http_tracker, tracker.clone(), false); - } - - // handle the signals here + // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { info!("Torrust shutting down.."); - // send kill signal - let _ = tx.send(true); - - // await for all udp servers to shutdown - futures::future::join_all(udp_server_handles).await; - - // Save torrents if enabled - if config.persistence { - info!("Saving torrents into SQL from memory..."); - let _ = tracker.save_torrents().await; - info!("Torrents saved"); - } + // Await for all jobs to shutdown + futures::future::join_all(jobs).await; + info!("Torrust successfully shutdown."); } } } - -fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> Option> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.cleanup_interval.unwrap_or(600); - - return Some(tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; // first tick is immediate... - // periodically call tracker.cleanup_torrents() - loop { - interval.tick().await; - if let Some(tracker) = weak_tracker.upgrade() { - tracker.cleanup_torrents().await; - } else { - break; - } - } - })); -} - -fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> JoinHandle<()> { - info!("Starting HTTP API server on: {}", config.bind_address); - let bind_addr = config.bind_address.parse::().unwrap(); - - tokio::spawn(async move { - let server = http_api_server::build_server(tracker); - let _ = server.bind(bind_addr).await; - }) -} - -fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc, ssl: bool) -> JoinHandle<()> { - let http_tracker = HttpServer::new(tracker); - let enabled = config.enabled; - let bind_addr = config.bind_address.parse::().unwrap(); - let ssl_enabled = config.ssl_enabled; - let ssl_bind_addr = config.ssl_bind_address.parse::().unwrap(); - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - tokio::spawn(async move { - // run with tls if ssl_enabled and cert and key path are set - if ssl && ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: {} (TLS)", ssl_bind_addr); - http_tracker.start_tls(ssl_bind_addr, ssl_cert_path.as_ref().unwrap(), ssl_key_path.as_ref().unwrap()).await; - } - if !ssl && enabled { - info!("Starting HTTP server on: {}", bind_addr); - http_tracker.start(bind_addr).await; - } - }) -} - -async fn start_udp_tracker_server(config: &UdpTrackerConfig, tracker: Arc, rx: tokio::sync::watch::Receiver) -> JoinHandle<()> { - let udp_server = UdpServer::new(tracker, &config.bind_address).await.unwrap_or_else(|e| { - panic!("Could not start UDP server: {}", e); - }); - - info!("Starting UDP server on: {}", config.bind_address); - tokio::spawn(async move { - udp_server.start(rx).await; - }) -} diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs new file mode 100644 index 000000000..1bc257916 --- /dev/null +++ b/src/servers/apis/mod.rs @@ -0,0 +1,8 @@ +pub mod routes; +pub mod server; +pub mod v1; + +use serde::Deserialize; + +#[derive(Deserialize)] +pub struct InfoHashParam(pub String); diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs new file mode 100644 index 000000000..2545d6b88 --- /dev/null +++ b/src/servers/apis/routes.rs @@ -0,0 +1,18 @@ +use std::sync::Arc; + +use axum::{middleware, Router}; + +use super::v1; +use super::v1::middlewares::auth::auth; +use crate::tracker::Tracker; + +#[allow(clippy::needless_pass_by_value)] +pub fn router(tracker: Arc) -> Router { + let router = Router::new(); + + let prefix = "/api"; + + let router = v1::routes::add(prefix, router, tracker.clone()); + + router.layer(middleware::from_fn_with_state(tracker.config.clone(), auth)) +} diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs new file mode 100644 index 000000000..e4714cd9a --- /dev/null +++ b/src/servers/apis/server.rs @@ -0,0 +1,246 @@ +use std::net::SocketAddr; +use std::str::FromStr; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; +use futures::future::BoxFuture; +use futures::Future; +use log::info; + +use super::routes::router; +use crate::servers::signals::shutdown_signal; +use crate::tracker::Tracker; + +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[allow(clippy::module_name_repetitions)] +pub type StoppedApiServer = ApiServer; +#[allow(clippy::module_name_repetitions)] +pub type RunningApiServer = ApiServer; + +#[allow(clippy::module_name_repetitions)] +pub struct ApiServer { + pub cfg: torrust_tracker_configuration::HttpApi, + pub state: S, +} + +pub struct Stopped; + +pub struct Running { + pub bind_addr: SocketAddr, + task_killer: tokio::sync::oneshot::Sender, + task: tokio::task::JoinHandle<()>, +} + +impl ApiServer { + #[must_use] + pub fn new(cfg: torrust_tracker_configuration::HttpApi) -> Self { + Self { cfg, state: Stopped {} } + } + + /// # Errors + /// + /// It would return an error if no `SocketAddr` is returned after launching the server. + pub async fn start(self, tracker: Arc) -> Result, Error> { + let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); + let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + + let configuration = self.cfg.clone(); + + let task = tokio::spawn(async move { + let (bind_addr, server) = Launcher::start(&configuration, tracker, shutdown_signal(shutdown_receiver)); + + addr_sender.send(bind_addr).expect("Could not return SocketAddr."); + + server.await; + }); + + let bind_address = addr_receiver + .await + .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; + + Ok(ApiServer { + cfg: self.cfg, + state: Running { + bind_addr: bind_address, + task_killer: shutdown_sender, + task, + }, + }) + } +} + +impl ApiServer { + /// # Errors + /// + /// It would return an error if the channel for the task killer signal was closed. + pub async fn stop(self) -> Result, Error> { + self.state + .task_killer + .send(0) + .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; + + let _ = self.state.task.await; + + Ok(ApiServer { + cfg: self.cfg, + state: Stopped {}, + }) + } +} + +struct Launcher; + +impl Launcher { + pub fn start( + cfg: &torrust_tracker_configuration::HttpApi, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); + let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); + let bind_addr = tcp_listener + .local_addr() + .expect("Could not get local_addr from tcp_listener."); + + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (&cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { + let server = Self::start_tls_with_graceful_shutdown( + tcp_listener, + (ssl_cert_path.to_string(), ssl_key_path.to_string()), + tracker, + shutdown_signal, + ); + + (bind_addr, server) + } else { + let server = Self::start_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); + + (bind_addr, server) + } + } + + pub fn start_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(tracker); + + Box::pin(async { + axum::Server::from_tcp(tcp_listener) + .expect("Could not bind to tcp listener.") + .serve(app.into_make_service_with_connect_info::()) + .with_graceful_shutdown(shutdown_signal) + .await + .expect("Axum server crashed."); + }) + } + + pub fn start_tls_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + (ssl_cert_path, ssl_key_path): (String, String), + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(tracker); + + let handle = Handle::new(); + + let cloned_handle = handle.clone(); + + tokio::task::spawn_local(async move { + shutdown_signal.await; + cloned_handle.shutdown(); + }); + + Box::pin(async { + let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) + .await + .expect("Could not read tls cert."); + + axum_server::from_tcp_rustls(tcp_listener, tls_config) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."); + }) + } +} + +pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future> { + let app = router(tracker); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust APIs server on http://{} ...", socket_addr); + }) +} + +pub fn start_tls( + socket_addr: SocketAddr, + ssl_config: RustlsConfig, + tracker: Arc, +) -> impl Future> { + let app = router(tracker); + + let handle = Handle::new(); + let shutdown_handle = handle.clone(); + + tokio::spawn(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust APIs server on https://{} ...", socket_addr); + shutdown_handle.shutdown(); + }); + + axum_server::bind_rustls(socket_addr, ssl_config) + .handle(handle) + .serve(app.into_make_service()) +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::apis::server::ApiServer; + use crate::tracker; + use crate::tracker::statistics; + + fn tracker_configuration() -> Arc { + Arc::new(configuration::ephemeral()) + } + + #[tokio::test] + async fn it_should_be_able_to_start_from_stopped_state_and_then_stop_again() { + let cfg = tracker_configuration(); + + let tracker = Arc::new(tracker::Tracker::new(cfg.clone(), None, statistics::Repo::new()).unwrap()); + + let stopped_api_server = ApiServer::new(cfg.http_api.clone()); + + let running_api_server_result = stopped_api_server.start(tracker).await; + + assert!(running_api_server_result.is_ok()); + + let running_api_server = running_api_server_result.unwrap(); + + assert!(running_api_server.stop().await.is_ok()); + } +} diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs new file mode 100644 index 000000000..d2e633206 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -0,0 +1,46 @@ +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use axum::extract::{Path, State}; +use axum::response::Response; +use serde::Deserialize; + +use super::responses::{ + auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, +}; +use crate::servers::apis::v1::context::auth_key::resources::AuthKey; +use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; +use crate::tracker::auth::Key; +use crate::tracker::Tracker; + +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + let seconds_valid = seconds_valid_or_key; + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(e) => failed_to_generate_key_response(e), + } +} + +#[derive(Deserialize)] +pub struct KeyParam(String); + +pub async fn delete_auth_key_handler( + State(tracker): State>, + Path(seconds_valid_or_key): Path, +) -> Response { + match Key::from_str(&seconds_valid_or_key.0) { + Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), + Ok(key) => match tracker.remove_auth_key(&key).await { + Ok(_) => ok_response(), + Err(e) => failed_to_delete_key_response(e), + }, + } +} + +pub async fn reload_keys_handler(State(tracker): State>) -> Response { + match tracker.load_keys_from_database().await { + Ok(_) => ok_response(), + Err(e) => failed_to_reload_keys_response(e), + } +} diff --git a/src/servers/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs new file mode 100644 index 000000000..746a2f064 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/mod.rs @@ -0,0 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs new file mode 100644 index 000000000..400b34eb7 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -0,0 +1,105 @@ +use std::convert::From; + +use serde::{Deserialize, Serialize}; + +use crate::shared::clock::convert_from_iso_8601_to_timestamp; +use crate::tracker::auth::{self, Key}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct AuthKey { + pub key: String, + pub valid_until: u64, // todo: remove when the torrust-index-backend starts using the `expiry_time` attribute. + pub expiry_time: String, +} + +impl From for auth::ExpiringKey { + fn from(auth_key_resource: AuthKey) -> Self { + auth::ExpiringKey { + key: auth_key_resource.key.parse::().unwrap(), + valid_until: convert_from_iso_8601_to_timestamp(&auth_key_resource.expiry_time), + } + } +} + +impl From for AuthKey { + fn from(auth_key: auth::ExpiringKey) -> Self { + AuthKey { + key: auth_key.key.to_string(), + valid_until: auth_key.valid_until.as_secs(), + expiry_time: auth_key.expiry_time().to_string(), + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use super::AuthKey; + use crate::shared::clock::{Current, TimeNow}; + use crate::tracker::auth::{self, Key}; + + struct TestTime { + pub timestamp: u64, + pub iso_8601_v1: String, + pub iso_8601_v2: String, + } + + fn one_hour_after_unix_epoch() -> TestTime { + let timestamp = 60_u64; + let iso_8601_v1 = "1970-01-01T00:01:00.000Z".to_string(); + let iso_8601_v2 = "1970-01-01 00:01:00 UTC".to_string(); + TestTime { + timestamp, + iso_8601_v1, + iso_8601_v2, + } + } + + #[test] + fn it_should_be_convertible_into_an_auth_key() { + let auth_key_resource = AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: one_hour_after_unix_epoch().timestamp, + expiry_time: one_hour_after_unix_epoch().iso_8601_v1, + }; + + assert_eq!( + auth::ExpiringKey::from(auth_key_resource), + auth::ExpiringKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() + } + ); + } + + #[test] + fn it_should_be_convertible_from_an_auth_key() { + let auth_key = auth::ExpiringKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), + }; + + assert_eq!( + AuthKey::from(auth_key), + AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: one_hour_after_unix_epoch().timestamp, + expiry_time: one_hour_after_unix_epoch().iso_8601_v2, + } + ); + } + + #[test] + fn it_should_be_convertible_into_json() { + assert_eq!( + serde_json::to_string(&AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: one_hour_after_unix_epoch().timestamp, + expiry_time: one_hour_after_unix_epoch().iso_8601_v1, + }) + .unwrap(), + "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60,\"expiry_time\":\"1970-01-01T00:01:00.000Z\"}" // cspell:disable-line + ); + } +} diff --git a/src/servers/apis/v1/context/auth_key/responses.rs b/src/servers/apis/v1/context/auth_key/responses.rs new file mode 100644 index 000000000..4e3b0c711 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/responses.rs @@ -0,0 +1,35 @@ +use std::error::Error; + +use axum::http::{header, StatusCode}; +use axum::response::{IntoResponse, Response}; + +use crate::servers::apis::v1::context::auth_key::resources::AuthKey; +use crate::servers::apis::v1::responses::unhandled_rejection_response; + +/// # Panics +/// +/// Will panic if it can't convert the `AuthKey` resource to json +#[must_use] +pub fn auth_key_response(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + +#[must_use] +pub fn failed_to_generate_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to generate key: {e}")) +} + +#[must_use] +pub fn failed_to_delete_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to delete key: {e}")) +} + +#[must_use] +pub fn failed_to_reload_keys_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload keys: {e}")) +} diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs new file mode 100644 index 000000000..9b155c2a5 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -0,0 +1,25 @@ +use std::sync::Arc; + +use axum::routing::{get, post}; +use axum::Router; + +use super::handlers::{delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; +use crate::tracker::Tracker; + +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + // Keys + router + .route( + // code-review: Axum does not allow two routes with the same path but different path variable name. + // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: + // POST /key + // DELETE /key/:key + &format!("{prefix}/key/:seconds_valid_or_key"), + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), + ) + // Keys command + .route(&format!("{prefix}/keys/reload"), get(reload_keys_handler).with_state(tracker)) +} diff --git a/src/servers/apis/v1/context/mod.rs b/src/servers/apis/v1/context/mod.rs new file mode 100644 index 000000000..6d3fb7566 --- /dev/null +++ b/src/servers/apis/v1/context/mod.rs @@ -0,0 +1,4 @@ +pub mod auth_key; +pub mod stats; +pub mod torrent; +pub mod whitelist; diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs new file mode 100644 index 000000000..e93e65996 --- /dev/null +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -0,0 +1,13 @@ +use std::sync::Arc; + +use axum::extract::State; +use axum::response::Json; + +use super::resources::Stats; +use super::responses::stats_response; +use crate::tracker::services::statistics::get_metrics; +use crate::tracker::Tracker; + +pub async fn get_stats_handler(State(tracker): State>) -> Json { + stats_response(get_metrics(tracker.clone()).await) +} diff --git a/src/servers/apis/v1/context/stats/mod.rs b/src/servers/apis/v1/context/stats/mod.rs new file mode 100644 index 000000000..746a2f064 --- /dev/null +++ b/src/servers/apis/v1/context/stats/mod.rs @@ -0,0 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs new file mode 100644 index 000000000..44ac814dc --- /dev/null +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -0,0 +1,100 @@ +use serde::{Deserialize, Serialize}; + +use crate::tracker::services::statistics::TrackerMetrics; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Stats { + pub torrents: u64, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + pub tcp4_connections_handled: u64, + pub tcp4_announces_handled: u64, + pub tcp4_scrapes_handled: u64, + pub tcp6_connections_handled: u64, + pub tcp6_announces_handled: u64, + pub tcp6_scrapes_handled: u64, + pub udp4_connections_handled: u64, + pub udp4_announces_handled: u64, + pub udp4_scrapes_handled: u64, + pub udp6_connections_handled: u64, + pub udp6_announces_handled: u64, + pub udp6_scrapes_handled: u64, +} + +impl From for Stats { + fn from(metrics: TrackerMetrics) -> Self { + Self { + torrents: metrics.torrents_metrics.torrents, + seeders: metrics.torrents_metrics.seeders, + completed: metrics.torrents_metrics.completed, + leechers: metrics.torrents_metrics.leechers, + tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, + tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, + tcp4_scrapes_handled: metrics.protocol_metrics.tcp4_scrapes_handled, + tcp6_connections_handled: metrics.protocol_metrics.tcp6_connections_handled, + tcp6_announces_handled: metrics.protocol_metrics.tcp6_announces_handled, + tcp6_scrapes_handled: metrics.protocol_metrics.tcp6_scrapes_handled, + udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, + udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, + udp4_scrapes_handled: metrics.protocol_metrics.udp4_scrapes_handled, + udp6_connections_handled: metrics.protocol_metrics.udp6_connections_handled, + udp6_announces_handled: metrics.protocol_metrics.udp6_announces_handled, + udp6_scrapes_handled: metrics.protocol_metrics.udp6_scrapes_handled, + } + } +} + +#[cfg(test)] +mod tests { + use super::Stats; + use crate::tracker::services::statistics::TrackerMetrics; + use crate::tracker::statistics::Metrics; + use crate::tracker::TorrentsMetrics; + + #[test] + fn stats_resource_should_be_converted_from_tracker_metrics() { + assert_eq!( + Stats::from(TrackerMetrics { + torrents_metrics: TorrentsMetrics { + seeders: 1, + completed: 2, + leechers: 3, + torrents: 4 + }, + protocol_metrics: Metrics { + tcp4_connections_handled: 5, + tcp4_announces_handled: 6, + tcp4_scrapes_handled: 7, + tcp6_connections_handled: 8, + tcp6_announces_handled: 9, + tcp6_scrapes_handled: 10, + udp4_connections_handled: 11, + udp4_announces_handled: 12, + udp4_scrapes_handled: 13, + udp6_connections_handled: 14, + udp6_announces_handled: 15, + udp6_scrapes_handled: 16 + } + }), + Stats { + torrents: 4, + seeders: 1, + completed: 2, + leechers: 3, + tcp4_connections_handled: 5, + tcp4_announces_handled: 6, + tcp4_scrapes_handled: 7, + tcp6_connections_handled: 8, + tcp6_announces_handled: 9, + tcp6_scrapes_handled: 10, + udp4_connections_handled: 11, + udp4_announces_handled: 12, + udp4_scrapes_handled: 13, + udp6_connections_handled: 14, + udp6_announces_handled: 15, + udp6_scrapes_handled: 16 + } + ); + } +} diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs new file mode 100644 index 000000000..ea9a2480a --- /dev/null +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -0,0 +1,8 @@ +use axum::response::Json; + +use super::resources::Stats; +use crate::tracker::services::statistics::TrackerMetrics; + +pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { + Json(Stats::from(tracker_metrics)) +} diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs new file mode 100644 index 000000000..07f88aa70 --- /dev/null +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -0,0 +1,11 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::get_stats_handler; +use crate::tracker::Tracker; + +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + router.route(&format!("{prefix}/stats"), get(get_stats_handler).with_state(tracker)) +} diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs new file mode 100644 index 000000000..4032f2e9a --- /dev/null +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -0,0 +1,59 @@ +use std::fmt; +use std::str::FromStr; +use std::sync::Arc; + +use axum::extract::{Path, Query, State}; +use axum::response::{IntoResponse, Json, Response}; +use serde::{de, Deserialize, Deserializer}; + +use super::resources::torrent::ListItem; +use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; +use crate::servers::apis::v1::responses::invalid_info_hash_param_response; +use crate::servers::apis::InfoHashParam; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; +use crate::tracker::Tracker; + +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { + Some(info) => torrent_info_response(info).into_response(), + None => torrent_not_known_response(), + }, + } +} + +#[derive(Deserialize)] +pub struct PaginationParams { + #[serde(default, deserialize_with = "empty_string_as_none")] + pub offset: Option, + pub limit: Option, +} + +pub async fn get_torrents_handler( + State(tracker): State>, + pagination: Query, +) -> Json> { + torrent_list_response( + &get_torrents( + tracker.clone(), + &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + ) + .await, + ) +} + +/// Serde deserialization decorator to map empty Strings to None, +fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: FromStr, + T::Err: fmt::Display, +{ + let opt = Option::::deserialize(de)?; + match opt.as_deref() { + None | Some("") => Ok(None), + Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), + } +} diff --git a/src/servers/apis/v1/context/torrent/mod.rs b/src/servers/apis/v1/context/torrent/mod.rs new file mode 100644 index 000000000..746a2f064 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/mod.rs @@ -0,0 +1,4 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/context/torrent/resources/mod.rs b/src/servers/apis/v1/context/torrent/resources/mod.rs new file mode 100644 index 000000000..46d62aac5 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/resources/mod.rs @@ -0,0 +1,2 @@ +pub mod peer; +pub mod torrent; diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs new file mode 100644 index 000000000..5284d26f6 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -0,0 +1,47 @@ +use serde::{Deserialize, Serialize}; + +use crate::tracker; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Peer { + pub peer_id: Id, + pub peer_addr: String, + #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] + pub updated: u128, + pub updated_milliseconds_ago: u128, + pub uploaded: i64, + pub downloaded: i64, + pub left: i64, + pub event: String, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Id { + pub id: Option, + pub client: Option, +} + +impl From for Id { + fn from(peer_id: tracker::peer::Id) -> Self { + Id { + id: peer_id.to_hex_string(), + client: peer_id.get_client_name().map(std::string::ToString::to_string), + } + } +} + +impl From for Peer { + #[allow(deprecated)] + fn from(peer: tracker::peer::Peer) -> Self { + Peer { + peer_id: Id::from(peer.peer_id), + peer_addr: peer.peer_addr.to_string(), + updated: peer.updated.as_millis(), + updated_milliseconds_ago: peer.updated.as_millis(), + uploaded: peer.uploaded.0, + downloaded: peer.downloaded.0, + left: peer.left.0, + event: format!("{:?}", peer.event), + } + } +} diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs new file mode 100644 index 000000000..e328f80c4 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -0,0 +1,135 @@ +use serde::{Deserialize, Serialize}; + +use super::peer; +use crate::tracker::services::torrent::{BasicInfo, Info}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Torrent { + pub info_hash: String, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub peers: Option>, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ListItem { + pub info_hash: String, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + // todo: this is always None. Remove field from endpoint? + pub peers: Option>, +} + +impl ListItem { + #[must_use] + pub fn new_vec(basic_info_vec: &[BasicInfo]) -> Vec { + basic_info_vec + .iter() + .map(|basic_info| ListItem::from((*basic_info).clone())) + .collect() + } +} + +#[must_use] +pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { + basic_info_vec + .iter() + .map(|basic_info| ListItem::from((*basic_info).clone())) + .collect() +} + +impl From for Torrent { + fn from(info: Info) -> Self { + Self { + info_hash: info.info_hash.to_string(), + seeders: info.seeders, + completed: info.completed, + leechers: info.leechers, + peers: info + .peers + .map(|peers| peers.iter().map(|peer| peer::Peer::from(*peer)).collect()), + } + } +} + +impl From for ListItem { + fn from(basic_info: BasicInfo) -> Self { + Self { + info_hash: basic_info.info_hash.to_string(), + seeders: basic_info.seeders, + completed: basic_info.completed, + leechers: basic_info.leechers, + peers: None, + } + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use super::Torrent; + use crate::servers::apis::v1::context::torrent::resources::peer::Peer; + use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; + use crate::tracker::peer; + use crate::tracker::services::torrent::{BasicInfo, Info}; + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + #[test] + fn torrent_resource_should_be_converted_from_torrent_info() { + assert_eq!( + Torrent::from(Info { + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + seeders: 1, + completed: 2, + leechers: 3, + peers: Some(vec![sample_peer()]), + }), + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 2, + leechers: 3, + peers: Some(vec![Peer::from(sample_peer())]), + } + ); + } + + #[test] + fn torrent_resource_list_item_should_be_converted_from_the_basic_torrent_info() { + assert_eq!( + ListItem::from(BasicInfo { + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + seeders: 1, + completed: 2, + leechers: 3, + }), + ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 2, + leechers: 3, + peers: None, + } + ); + } +} diff --git a/src/servers/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs new file mode 100644 index 000000000..48e3c6e7f --- /dev/null +++ b/src/servers/apis/v1/context/torrent/responses.rs @@ -0,0 +1,18 @@ +use axum::response::{IntoResponse, Json, Response}; +use serde_json::json; + +use super::resources::torrent::{ListItem, Torrent}; +use crate::tracker::services::torrent::{BasicInfo, Info}; + +pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { + Json(ListItem::new_vec(basic_infos)) +} + +pub fn torrent_info_response(info: Info) -> Json { + Json(Torrent::from(info)) +} + +#[must_use] +pub fn torrent_not_known_response() -> Response { + Json(json!("torrent not known")).into_response() +} diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs new file mode 100644 index 000000000..00faa9665 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/routes.rs @@ -0,0 +1,17 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::{get_torrent_handler, get_torrents_handler}; +use crate::tracker::Tracker; + +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + // Torrents + router + .route( + &format!("{prefix}/torrent/:info_hash"), + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route(&format!("{prefix}/torrents"), get(get_torrents_handler).with_state(tracker)) +} diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs new file mode 100644 index 000000000..25e285c0b --- /dev/null +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -0,0 +1,46 @@ +use std::str::FromStr; +use std::sync::Arc; + +use axum::extract::{Path, State}; +use axum::response::Response; + +use super::responses::{ + failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, +}; +use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; +use crate::servers::apis::InfoHashParam; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::tracker::Tracker; + +pub async fn add_torrent_to_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(_) => ok_response(), + Err(e) => failed_to_whitelist_torrent_response(e), + }, + } +} + +pub async fn remove_torrent_from_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => ok_response(), + Err(e) => failed_to_remove_torrent_from_whitelist_response(e), + }, + } +} + +pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { + match tracker.load_whitelist_from_database().await { + Ok(_) => ok_response(), + Err(e) => failed_to_reload_whitelist_response(e), + } +} diff --git a/src/servers/apis/v1/context/whitelist/mod.rs b/src/servers/apis/v1/context/whitelist/mod.rs new file mode 100644 index 000000000..f6f000f34 --- /dev/null +++ b/src/servers/apis/v1/context/whitelist/mod.rs @@ -0,0 +1,3 @@ +pub mod handlers; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/context/whitelist/responses.rs b/src/servers/apis/v1/context/whitelist/responses.rs new file mode 100644 index 000000000..06d4a9448 --- /dev/null +++ b/src/servers/apis/v1/context/whitelist/responses.rs @@ -0,0 +1,20 @@ +use std::error::Error; + +use axum::response::Response; + +use crate::servers::apis::v1::responses::unhandled_rejection_response; + +#[must_use] +pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to remove torrent from whitelist: {e}")) +} + +#[must_use] +pub fn failed_to_whitelist_torrent_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to whitelist torrent: {e}")) +} + +#[must_use] +pub fn failed_to_reload_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload whitelist: {e}")) +} diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs new file mode 100644 index 000000000..06011b462 --- /dev/null +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -0,0 +1,24 @@ +use std::sync::Arc; + +use axum::routing::{delete, get, post}; +use axum::Router; + +use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; +use crate::tracker::Tracker; + +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + let prefix = format!("{prefix}/whitelist"); + + router + // Whitelisted torrents + .route( + &format!("{prefix}/:info_hash"), + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) + .route( + &format!("{prefix}/:info_hash"), + delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) + // Whitelist commands + .route(&format!("{prefix}/reload"), get(reload_whitelist_handler).with_state(tracker)) +} diff --git a/src/servers/apis/v1/middlewares/auth.rs b/src/servers/apis/v1/middlewares/auth.rs new file mode 100644 index 000000000..f0c63250b --- /dev/null +++ b/src/servers/apis/v1/middlewares/auth.rs @@ -0,0 +1,63 @@ +use std::sync::Arc; + +use axum::extract::{Query, State}; +use axum::http::Request; +use axum::middleware::Next; +use axum::response::{IntoResponse, Response}; +use serde::Deserialize; +use torrust_tracker_configuration::{Configuration, HttpApi}; + +use crate::servers::apis::v1::responses::unhandled_rejection_response; + +#[derive(Deserialize, Debug)] +pub struct QueryParams { + pub token: Option, +} + +/// Middleware for authentication using a "token" GET param. +/// The token must be one of the tokens in the tracker HTTP API configuration. +pub async fn auth( + State(config): State>, + Query(params): Query, + request: Request, + next: Next, +) -> Response +where + B: Send, +{ + let Some(token) = params.token else { return AuthError::Unauthorized.into_response() }; + + if !authenticate(&token, &config.http_api) { + return AuthError::TokenNotValid.into_response(); + } + + next.run(request).await +} + +enum AuthError { + Unauthorized, + TokenNotValid, +} + +impl IntoResponse for AuthError { + fn into_response(self) -> Response { + match self { + AuthError::Unauthorized => unauthorized_response(), + AuthError::TokenNotValid => token_not_valid_response(), + } + } +} + +fn authenticate(token: &str, http_api_config: &HttpApi) -> bool { + http_api_config.contains_token(token) +} + +#[must_use] +pub fn unauthorized_response() -> Response { + unhandled_rejection_response("unauthorized".to_string()) +} + +#[must_use] +pub fn token_not_valid_response() -> Response { + unhandled_rejection_response("token not valid".to_string()) +} diff --git a/src/servers/apis/v1/middlewares/mod.rs b/src/servers/apis/v1/middlewares/mod.rs new file mode 100644 index 000000000..0e4a05d59 --- /dev/null +++ b/src/servers/apis/v1/middlewares/mod.rs @@ -0,0 +1 @@ +pub mod auth; diff --git a/src/servers/apis/v1/mod.rs b/src/servers/apis/v1/mod.rs new file mode 100644 index 000000000..e87984b8e --- /dev/null +++ b/src/servers/apis/v1/mod.rs @@ -0,0 +1,4 @@ +pub mod context; +pub mod middlewares; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/responses.rs b/src/servers/apis/v1/responses.rs new file mode 100644 index 000000000..4a9c39bf9 --- /dev/null +++ b/src/servers/apis/v1/responses.rs @@ -0,0 +1,80 @@ +use axum::http::{header, StatusCode}; +use axum::response::{IntoResponse, Response}; +use serde::Serialize; + +/* code-review: + When Axum cannot parse a path or query param it shows a message like this: + + For the "seconds_valid_or_key" path param: + + "Invalid URL: Cannot parse "-1" to a `u64`" + + That message is not an informative message, specially if you have more than one param. + We should show a message similar to the one we use when we parse the value in the handler. + For example: + + "Invalid URL: invalid infohash param: string \"INVALID VALUE\", expected a 40 character long string" + + We can customize the error message by using a custom type with custom serde deserialization. + The same we are using for the "InfoHashVisitor". + + Input data from HTTP requests should use struts with primitive types (first level of validation). + We can put the second level of validation in the application and domain services. +*/ + +#[derive(Serialize, Debug)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum ActionStatus<'a> { + Ok, + Err { reason: std::borrow::Cow<'a, str> }, +} + +// OK response + +/// # Panics +/// +/// Will panic if it can't convert the `ActionStatus` to json +#[must_use] +pub fn ok_response() -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json")], + serde_json::to_string(&ActionStatus::Ok).unwrap(), + ) + .into_response() +} + +// Error responses + +#[must_use] +pub fn invalid_info_hash_param_response(info_hash: &str) -> Response { + bad_request_response(&format!( + "Invalid URL: invalid infohash param: string \"{info_hash}\", expected a 40 character long string" + )) +} + +#[must_use] +pub fn invalid_auth_key_param_response(invalid_key: &str) -> Response { + bad_request_response(&format!("Invalid auth key id param \"{invalid_key}\"")) +} + +fn bad_request_response(body: &str) -> Response { + ( + StatusCode::BAD_REQUEST, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + body.to_owned(), + ) + .into_response() +} + +/// This error response is to keep backward compatibility with the old API. +/// It should be a plain text or json. +#[must_use] +pub fn unhandled_rejection_response(reason: String) -> Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), + ) + .into_response() +} diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs new file mode 100644 index 000000000..d45319c4b --- /dev/null +++ b/src/servers/apis/v1/routes.rs @@ -0,0 +1,23 @@ +use std::sync::Arc; + +use axum::Router; + +use super::context::{auth_key, stats, torrent, whitelist}; +use crate::tracker::Tracker; + +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + // Without `v1` prefix. + // We keep the old API endpoints without `v1` prefix for backward compatibility. + // todo: remove when the torrust index backend is using the `v1` prefix. + let router = auth_key::routes::add(prefix, router, tracker.clone()); + let router = stats::routes::add(prefix, router, tracker.clone()); + let router = whitelist::routes::add(prefix, router, tracker.clone()); + let router = torrent::routes::add(prefix, router, tracker.clone()); + + // With `v1` prefix + let v1_prefix = format!("{prefix}/v1"); + let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); + let router = stats::routes::add(&v1_prefix, router, tracker.clone()); + let router = whitelist::routes::add(&v1_prefix, router, tracker.clone()); + torrent::routes::add(&v1_prefix, router, tracker) +} diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs new file mode 100644 index 000000000..b8aa6b19f --- /dev/null +++ b/src/servers/http/mod.rs @@ -0,0 +1,22 @@ +//! Tracker HTTP/HTTPS Protocol: +//! +//! Original specification in BEP 3 (section "Trackers"): +//! +//! +//! +//! Other resources: +//! +//! - +//! - +//! + +use serde::{Deserialize, Serialize}; + +pub mod percent_encoding; +pub mod server; +pub mod v1; + +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +pub enum Version { + V1, +} diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs new file mode 100644 index 000000000..019735e0f --- /dev/null +++ b/src/servers/http/percent_encoding.rs @@ -0,0 +1,66 @@ +use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; +use crate::tracker::peer::{self, IdConversionError}; + +/// # Errors +/// +/// Will return `Err` if the decoded bytes do not represent a valid `InfoHash`. +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); + InfoHash::try_from(bytes) +} + +/// # Errors +/// +/// Will return `Err` if if the decoded bytes do not represent a valid `peer::Id`. +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); + peer::Id::try_from(bytes) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn it_should_decode_a_percent_encoded_info_hash() { + let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; + + let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() + ); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_info_hash() { + let invalid_encoded_infohash = "invalid percent-encoded infohash"; + + let info_hash = percent_decode_info_hash(invalid_encoded_infohash); + + assert!(info_hash.is_err()); + } + + #[test] + fn it_should_decode_a_percent_encoded_peer_id() { + let encoded_peer_id = "%2DqB00000000000000000"; + + let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); + + assert_eq!(peer_id, peer::Id(*b"-qB00000000000000000")); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_peer_id() { + let invalid_encoded_peer_id = "invalid percent-encoded peer id"; + + let peer_id = percent_decode_peer_id(invalid_encoded_peer_id); + + assert!(peer_id.is_err()); + } +} diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs new file mode 100644 index 000000000..510c685d4 --- /dev/null +++ b/src/servers/http/server.rs @@ -0,0 +1,112 @@ +use std::future::Future; +use std::net::SocketAddr; +use std::sync::Arc; + +use futures::future::BoxFuture; + +use crate::servers::signals::shutdown_signal; +use crate::tracker::Tracker; + +/// Trait to be implemented by a http server launcher for the tracker. +#[allow(clippy::module_name_repetitions)] +pub trait HttpServerLauncher: Sync + Send { + fn new() -> Self; + + fn start_with_graceful_shutdown( + &self, + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static; +} + +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[allow(clippy::module_name_repetitions)] +pub type StoppedHttpServer = HttpServer>; +#[allow(clippy::module_name_repetitions)] +pub type RunningHttpServer = HttpServer>; + +#[allow(clippy::module_name_repetitions)] +pub struct HttpServer { + pub cfg: torrust_tracker_configuration::HttpTracker, + pub state: S, +} + +pub struct Stopped { + launcher: I, +} + +pub struct Running { + pub bind_addr: SocketAddr, + task_killer: tokio::sync::oneshot::Sender, + task: tokio::task::JoinHandle, +} + +impl HttpServer> { + pub fn new(cfg: torrust_tracker_configuration::HttpTracker, launcher: I) -> Self { + Self { + cfg, + state: Stopped { launcher }, + } + } + + /// # Errors + /// + /// It would return an error if no `SocketAddr` is returned after launching the server. + pub async fn start(self, tracker: Arc) -> Result>, Error> { + let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); + let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + + let configuration = self.cfg.clone(); + let launcher = self.state.launcher; + + let task = tokio::spawn(async move { + let (bind_addr, server) = + launcher.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); + + addr_sender.send(bind_addr).expect("Could not return SocketAddr."); + + server.await; + + launcher + }); + + let bind_address = addr_receiver + .await + .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; + + Ok(HttpServer { + cfg: self.cfg, + state: Running { + bind_addr: bind_address, + task_killer: shutdown_sender, + task, + }, + }) + } +} + +impl HttpServer> { + /// # Errors + /// + /// It would return an error if the channel for the task killer signal was closed. + pub async fn stop(self) -> Result>, Error> { + self.state + .task_killer + .send(0) + .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; + + let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; + + Ok(HttpServer { + cfg: self.cfg, + state: Stopped { launcher }, + }) + } +} diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs new file mode 100644 index 000000000..501181c8c --- /dev/null +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -0,0 +1,113 @@ +use std::panic::Location; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; + +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::requests::announce::{Announce, ParseAnnounceQueryError}; +use crate::servers::http::v1::responses; + +pub struct ExtractRequest(pub Announce); + +#[async_trait] +impl FromRequestParts for ExtractRequest +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + match extract_announce_from(parts.uri.query()) { + Ok(announce_request) => Ok(ExtractRequest(announce_request)), + Err(error) => Err(error.into_response()), + } + } +} + +fn extract_announce_from(maybe_raw_query: Option<&str>) -> Result { + if maybe_raw_query.is_none() { + return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { + location: Location::caller(), + })); + } + + let query = maybe_raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error)); + } + + let announce_request = Announce::try_from(query.unwrap()); + + if let Err(error) = announce_request { + return Err(responses::error::Error::from(error)); + } + + Ok(announce_request.unwrap()) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::extract_announce_from; + use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; + use crate::servers::http::v1::responses::error::Error; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::peer; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_extract_the_announce_request_from_the_url_query_params() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0"; + + let announce = extract_announce_from(Some(raw_query)).unwrap(); + + assert_eq!( + announce, + Announce { + info_hash: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), + peer_id: peer::Id(*b"-qB00000000000000001"), + port: 17548, + downloaded: Some(0), + uploaded: Some(0), + left: Some(0), + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + } + ); + } + + #[test] + fn it_should_reject_a_request_without_query_params() { + let response = extract_announce_from(None).unwrap_err(); + + assert_error_response( + &response, + "Cannot parse query params for announce request: missing query params for announce request", + ); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed() { + let invalid_query = "param1=value1=value2"; + let response = extract_announce_from(Some(invalid_query)).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params"); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_an_announce_request() { + let response = extract_announce_from(Some("param1=value1")).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params for announce request"); + } +} diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs new file mode 100644 index 000000000..71e9b9d25 --- /dev/null +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -0,0 +1,105 @@ +//! Wrapper for Axum `Path` extractor to return custom errors. +use std::panic::Location; + +use axum::async_trait; +use axum::extract::rejection::PathRejection; +use axum::extract::{FromRequestParts, Path}; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; +use serde::Deserialize; + +use crate::servers::http::v1::handlers::common::auth; +use crate::servers::http::v1::responses; +use crate::tracker::auth::Key; + +pub struct Extract(pub Key); + +#[derive(Deserialize)] +pub struct KeyParam(String); + +impl KeyParam { + #[must_use] + pub fn value(&self) -> String { + self.0.clone() + } +} + +#[async_trait] +impl FromRequestParts for Extract +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + // Extract `key` from URL path with Axum `Path` extractor + let maybe_path_with_key = Path::::from_request_parts(parts, state).await; + + match extract_key(maybe_path_with_key) { + Ok(key) => Ok(Extract(key)), + Err(error) => Err(error.into_response()), + } + } +} + +fn extract_key(path_extractor_result: Result, PathRejection>) -> Result { + match path_extractor_result { + Ok(key_param) => match parse_key(&key_param.0.value()) { + Ok(key) => Ok(key), + Err(error) => Err(error), + }, + Err(path_rejection) => Err(custom_error(&path_rejection)), + } +} + +fn parse_key(key: &str) -> Result { + let key = key.parse::(); + + match key { + Ok(key) => Ok(key), + Err(_parse_key_error) => Err(responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + })), + } +} + +fn custom_error(rejection: &PathRejection) -> responses::error::Error { + match rejection { + axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { + responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + }) + } + axum::extract::rejection::PathRejection::MissingPathParams(_) => { + responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + }) + } + _ => responses::error::Error::from(auth::Error::CannotExtractKeyParam { + location: Location::caller(), + }), + } +} + +#[cfg(test)] +mod tests { + + use super::parse_key; + use crate::servers::http::v1::responses::error::Error; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_return_an_authentication_error_if_the_key_cannot_be_parsed() { + let invalid_key = "invalid_key"; + + let response = parse_key(invalid_key).unwrap_err(); + + assert_error_response(&response, "Authentication error: Invalid format for authentication key param"); + } +} diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs new file mode 100644 index 000000000..b291eba12 --- /dev/null +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -0,0 +1,38 @@ +//! Wrapper for two Axum extractors to get the relevant information +//! to resolve the remote client IP. +use std::net::SocketAddr; + +use axum::async_trait; +use axum::extract::{ConnectInfo, FromRequestParts}; +use axum::http::request::Parts; +use axum::response::Response; +use axum_client_ip::RightmostXForwardedFor; + +use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + +pub struct Extract(pub ClientIpSources); + +#[async_trait] +impl FromRequestParts for Extract +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + let right_most_x_forwarded_for = match RightmostXForwardedFor::from_request_parts(parts, state).await { + Ok(right_most_x_forwarded_for) => Some(right_most_x_forwarded_for.0), + Err(_) => None, + }; + + let connection_info_ip = match ConnectInfo::::from_request_parts(parts, state).await { + Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), + Err(_) => None, + }; + + Ok(Extract(ClientIpSources { + right_most_x_forwarded_for, + connection_info_ip, + })) + } +} diff --git a/src/servers/http/v1/extractors/mod.rs b/src/servers/http/v1/extractors/mod.rs new file mode 100644 index 000000000..557330257 --- /dev/null +++ b/src/servers/http/v1/extractors/mod.rs @@ -0,0 +1,4 @@ +pub mod announce_request; +pub mod authentication_key; +pub mod client_ip_sources; +pub mod scrape_request; diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs new file mode 100644 index 000000000..ee2502066 --- /dev/null +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -0,0 +1,135 @@ +use std::panic::Location; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; + +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; +use crate::servers::http::v1::responses; + +pub struct ExtractRequest(pub Scrape); + +#[async_trait] +impl FromRequestParts for ExtractRequest +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + match extract_scrape_from(parts.uri.query()) { + Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), + Err(error) => Err(error.into_response()), + } + } +} + +fn extract_scrape_from(maybe_raw_query: Option<&str>) -> Result { + if maybe_raw_query.is_none() { + return Err(responses::error::Error::from(ParseScrapeQueryError::MissingParams { + location: Location::caller(), + })); + } + + let query = maybe_raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error)); + } + + let scrape_request = Scrape::try_from(query.unwrap()); + + if let Err(error) = scrape_request { + return Err(responses::error::Error::from(error)); + } + + Ok(scrape_request.unwrap()) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::extract_scrape_from; + use crate::servers::http::v1::requests::scrape::Scrape; + use crate::servers::http::v1::responses::error::Error; + use crate::shared::bit_torrent::info_hash::InfoHash; + + struct TestInfoHash { + pub bencoded: String, + pub value: InfoHash, + } + + fn test_info_hash() -> TestInfoHash { + TestInfoHash { + bencoded: "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0".to_owned(), + value: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), + } + } + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_extract_the_scrape_request_from_the_url_query_params() { + let info_hash = test_info_hash(); + + let raw_query = format!("info_hash={}", info_hash.bencoded); + + let scrape = extract_scrape_from(Some(&raw_query)).unwrap(); + + assert_eq!( + scrape, + Scrape { + info_hashes: vec![info_hash.value], + } + ); + } + + #[test] + fn it_should_extract_the_scrape_request_from_the_url_query_params_with_more_than_one_info_hash() { + let info_hash = test_info_hash(); + + let raw_query = format!("info_hash={}&info_hash={}", info_hash.bencoded, info_hash.bencoded); + + let scrape = extract_scrape_from(Some(&raw_query)).unwrap(); + + assert_eq!( + scrape, + Scrape { + info_hashes: vec![info_hash.value, info_hash.value], + } + ); + } + + #[test] + fn it_should_reject_a_request_without_query_params() { + let response = extract_scrape_from(None).unwrap_err(); + + assert_error_response( + &response, + "Cannot parse query params for scrape request: missing query params for scrape request", + ); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed() { + let invalid_query = "param1=value1=value2"; + let response = extract_scrape_from(Some(invalid_query)).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params"); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_a_scrape_request() { + let response = extract_scrape_from(Some("param1=value1")).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params for scrape request"); + } +} diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs new file mode 100644 index 000000000..db41388ab --- /dev/null +++ b/src/servers/http/v1/handlers/announce.rs @@ -0,0 +1,321 @@ +use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; +use std::sync::Arc; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use axum::extract::State; +use axum::response::{IntoResponse, Response}; +use log::debug; + +use crate::servers::http::v1::extractors::announce_request::ExtractRequest; +use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::servers::http::v1::handlers::common::auth; +use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; +use crate::servers::http::v1::responses::{self, announce}; +use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; +use crate::servers::http::v1::services::{self, peer_ip_resolver}; +use crate::shared::clock::{Current, Time}; +use crate::tracker::auth::Key; +use crate::tracker::peer::Peer; +use crate::tracker::{AnnounceData, Tracker}; + +#[allow(clippy::unused_async)] +pub async fn handle_without_key( + State(tracker): State>, + ExtractRequest(announce_request): ExtractRequest, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, +) -> Response { + debug!("http announce request: {:#?}", announce_request); + + handle(&tracker, &announce_request, &client_ip_sources, None).await +} + +#[allow(clippy::unused_async)] +pub async fn handle_with_key( + State(tracker): State>, + ExtractRequest(announce_request): ExtractRequest, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, + ExtractKey(key): ExtractKey, +) -> Response { + debug!("http announce request: {:#?}", announce_request); + + handle(&tracker, &announce_request, &client_ip_sources, Some(key)).await +} + +async fn handle( + tracker: &Arc, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Response { + let announce_data = match handle_announce(tracker, announce_request, client_ip_sources, maybe_key).await { + Ok(announce_data) => announce_data, + Err(error) => return error.into_response(), + }; + build_response(announce_request, announce_data) +} + +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + +async fn handle_announce( + tracker: &Arc, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Result { + // Authentication + if tracker.requires_authentication() { + match maybe_key { + Some(key) => match tracker.authenticate(&key).await { + Ok(_) => (), + Err(error) => return Err(responses::error::Error::from(error)), + }, + None => { + return Err(responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + })) + } + } + } + + // Authorization + match tracker.authorize(&announce_request.info_hash).await { + Ok(_) => (), + Err(error) => return Err(responses::error::Error::from(error)), + } + + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(responses::error::Error::from(error)), + }; + + let mut peer = peer_from_request(announce_request, &peer_ip); + + let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer).await; + + Ok(announce_data) +} + +fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { + match &announce_request.compact { + Some(compact) => match compact { + Compact::Accepted => announce::Compact::from(announce_data).into_response(), + Compact::NotAccepted => announce::NonCompact::from(announce_data).into_response(), + }, + // Default response format non compact + None => announce::NonCompact::from(announce_data).into_response(), + } +} + +/// It ignores the peer address in the announce request params. +#[must_use] +fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { + Peer { + peer_id: announce_request.peer_id, + peer_addr: SocketAddr::new(*peer_ip, announce_request.port), + updated: Current::now(), + uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), + downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), + left: NumberOfBytes(announce_request.left.unwrap_or(0)), + event: map_to_aquatic_event(&announce_request.event), + } +} + +fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => aquatic_udp_protocol::AnnounceEvent::Started, + Event::Stopped => aquatic_udp_protocol::AnnounceEvent::Stopped, + Event::Completed => aquatic_udp_protocol::AnnounceEvent::Completed, + }, + None => aquatic_udp_protocol::AnnounceEvent::None, + } +} + +#[cfg(test)] +mod tests { + + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::v1::requests::announce::Announce; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::services::tracker_factory; + use crate::tracker::{peer, Tracker}; + + fn private_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_private().into()) + } + + fn whitelisted_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_whitelisted().into()) + } + + fn tracker_on_reverse_proxy() -> Tracker { + tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) + } + + fn tracker_not_on_reverse_proxy() -> Tracker { + tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) + } + + fn sample_announce_request() -> Announce { + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + downloaded: None, + uploaded: None, + left: None, + event: None, + compact: None, + } + } + + fn sample_client_ip_sources() -> ClientIpSources { + ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + } + } + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod with_tracker_in_private_mode { + + use std::str::FromStr; + use std::sync::Arc; + + use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::tracker::auth; + + #[tokio::test] + async fn it_should_fail_when_the_authentication_key_is_missing() { + let tracker = Arc::new(private_tracker()); + + let maybe_key = None; + + let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Authentication error: Missing authentication key param for private tracker", + ); + } + + #[tokio::test] + async fn it_should_fail_when_the_authentication_key_is_invalid() { + let tracker = Arc::new(private_tracker()); + + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let maybe_key = Some(unregistered_key); + + let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) + .await + .unwrap_err(); + + assert_error_response(&response, "Authentication error: Failed to read key"); + } + } + + mod with_tracker_in_listed_mode { + + use std::sync::Arc; + + use super::{sample_announce_request, sample_client_ip_sources, whitelisted_tracker}; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + + #[tokio::test] + async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { + let tracker = Arc::new(whitelisted_tracker()); + + let announce_request = sample_announce_request(); + + let response = handle_announce(&tracker, &announce_request, &sample_client_ip_sources(), None) + .await + .unwrap_err(); + + assert_error_response( + &response, + &format!( + "Tracker error: The torrent: {}, is not whitelisted", + announce_request.info_hash + ), + ); + } + } + + mod with_tracker_on_reverse_proxy { + + use std::sync::Arc; + + use super::{sample_announce_request, tracker_on_reverse_proxy}; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { + let tracker = Arc::new(tracker_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } + } + + mod with_tracker_not_on_reverse_proxy { + + use std::sync::Arc; + + use super::{sample_announce_request, tracker_not_on_reverse_proxy}; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { + let tracker = Arc::new(tracker_not_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } +} diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs new file mode 100644 index 000000000..644556e95 --- /dev/null +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -0,0 +1,32 @@ +use std::panic::Location; + +use thiserror::Error; + +use crate::servers::http::v1::responses; +use crate::tracker::auth; + +#[derive(Debug, Error)] +pub enum Error { + #[error("Missing authentication key param for private tracker. Error in {location}")] + MissingAuthKey { location: &'static Location<'static> }, + #[error("Invalid format for authentication key param. Error in {location}")] + InvalidKeyFormat { location: &'static Location<'static> }, + #[error("Cannot extract authentication key param from URL path. Error in {location}")] + CannotExtractKeyParam { location: &'static Location<'static> }, +} + +impl From for responses::error::Error { + fn from(err: Error) -> Self { + responses::error::Error { + failure_reason: format!("Authentication error: {err}"), + } + } +} + +impl From for responses::error::Error { + fn from(err: auth::Error) -> Self { + responses::error::Error { + failure_reason: format!("Authentication error: {err}"), + } + } +} diff --git a/src/servers/http/v1/handlers/common/mod.rs b/src/servers/http/v1/handlers/common/mod.rs new file mode 100644 index 000000000..dc028cabf --- /dev/null +++ b/src/servers/http/v1/handlers/common/mod.rs @@ -0,0 +1,2 @@ +pub mod auth; +pub mod peer_ip; diff --git a/src/servers/http/v1/handlers/common/peer_ip.rs b/src/servers/http/v1/handlers/common/peer_ip.rs new file mode 100644 index 000000000..685324b4a --- /dev/null +++ b/src/servers/http/v1/handlers/common/peer_ip.rs @@ -0,0 +1,34 @@ +use crate::servers::http::v1::responses; +use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; + +impl From for responses::error::Error { + fn from(err: PeerIpResolutionError) -> Self { + responses::error::Error { + failure_reason: format!("Error resolving peer IP: {err}"), + } + } +} + +#[cfg(test)] +mod tests { + use std::panic::Location; + + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_map_a_peer_ip_resolution_error_into_an_error_response() { + let response = responses::error::Error::from(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }); + + assert_error_response(&response, "Error resolving peer IP"); + } +} diff --git a/src/servers/http/v1/handlers/mod.rs b/src/servers/http/v1/handlers/mod.rs new file mode 100644 index 000000000..69b69127e --- /dev/null +++ b/src/servers/http/v1/handlers/mod.rs @@ -0,0 +1,14 @@ +use super::responses; +use crate::tracker::error::Error; + +pub mod announce; +pub mod common; +pub mod scrape; + +impl From for responses::error::Error { + fn from(err: Error) -> Self { + responses::error::Error { + failure_reason: format!("Tracker error: {err}"), + } + } +} diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs new file mode 100644 index 000000000..f55194810 --- /dev/null +++ b/src/servers/http/v1/handlers/scrape.rs @@ -0,0 +1,266 @@ +use std::sync::Arc; + +use axum::extract::State; +use axum::response::{IntoResponse, Response}; +use log::debug; + +use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; +use crate::servers::http::v1::requests::scrape::Scrape; +use crate::servers::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use crate::servers::http::v1::{responses, services}; +use crate::tracker::auth::Key; +use crate::tracker::{ScrapeData, Tracker}; + +#[allow(clippy::unused_async)] +pub async fn handle_without_key( + State(tracker): State>, + ExtractRequest(scrape_request): ExtractRequest, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, +) -> Response { + debug!("http scrape request: {:#?}", &scrape_request); + + handle(&tracker, &scrape_request, &client_ip_sources, None).await +} + +#[allow(clippy::unused_async)] +pub async fn handle_with_key( + State(tracker): State>, + ExtractRequest(scrape_request): ExtractRequest, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, + ExtractKey(key): ExtractKey, +) -> Response { + debug!("http scrape request: {:#?}", &scrape_request); + + handle(&tracker, &scrape_request, &client_ip_sources, Some(key)).await +} + +async fn handle( + tracker: &Arc, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Response { + let scrape_data = match handle_scrape(tracker, scrape_request, client_ip_sources, maybe_key).await { + Ok(scrape_data) => scrape_data, + Err(error) => return error.into_response(), + }; + build_response(scrape_data) +} + +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. +*/ + +async fn handle_scrape( + tracker: &Arc, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Result { + // Authentication + let return_real_scrape_data = if tracker.requires_authentication() { + match maybe_key { + Some(key) => match tracker.authenticate(&key).await { + Ok(_) => true, + Err(_error) => false, + }, + None => false, + } + } else { + true + }; + + // Authorization for scrape requests is handled at the `Tracker` level + // for each torrent. + + let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(responses::error::Error::from(error)), + }; + + if return_real_scrape_data { + Ok(services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await) + } else { + Ok(services::scrape::fake(tracker, &scrape_request.info_hashes, &peer_ip).await) + } +} + +fn build_response(scrape_data: ScrapeData) -> Response { + responses::scrape::Bencoded::from(scrape_data).into_response() +} + +#[cfg(test)] +mod tests { + use std::net::IpAddr; + use std::str::FromStr; + + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::v1::requests::scrape::Scrape; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::services::tracker_factory; + use crate::tracker::Tracker; + + fn private_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_private().into()) + } + + fn whitelisted_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_whitelisted().into()) + } + + fn tracker_on_reverse_proxy() -> Tracker { + tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) + } + + fn tracker_not_on_reverse_proxy() -> Tracker { + tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) + } + + fn sample_scrape_request() -> Scrape { + Scrape { + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], + } + } + + fn sample_client_ip_sources() -> ClientIpSources { + ClientIpSources { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: Some(IpAddr::from_str("203.0.113.196").unwrap()), + } + } + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod with_tracker_in_private_mode { + use std::str::FromStr; + use std::sync::Arc; + + use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::tracker::{auth, ScrapeData}; + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { + let tracker = Arc::new(private_tracker()); + + let scrape_request = sample_scrape_request(); + let maybe_key = None; + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { + let tracker = Arc::new(private_tracker()); + + let scrape_request = sample_scrape_request(); + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let maybe_key = Some(unregistered_key); + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + + mod with_tracker_in_listed_mode { + + use std::sync::Arc; + + use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::tracker::ScrapeData; + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { + let tracker = Arc::new(whitelisted_tracker()); + + let scrape_request = sample_scrape_request(); + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), None) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + + mod with_tracker_on_reverse_proxy { + use std::sync::Arc; + + use super::{sample_scrape_request, tracker_on_reverse_proxy}; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { + let tracker = Arc::new(tracker_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } + } + + mod with_tracker_not_on_reverse_proxy { + use std::sync::Arc; + + use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { + let tracker = Arc::new(tracker_not_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } +} diff --git a/src/servers/http/v1/launcher.rs b/src/servers/http/v1/launcher.rs new file mode 100644 index 000000000..4cfa4295d --- /dev/null +++ b/src/servers/http/v1/launcher.rs @@ -0,0 +1,146 @@ +use std::future::Future; +use std::net::SocketAddr; +use std::str::FromStr; +use std::sync::Arc; + +use async_trait::async_trait; +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; +use futures::future::BoxFuture; +use log::info; + +use super::routes::router; +use crate::servers::http::server::HttpServerLauncher; +use crate::tracker::Tracker; + +#[derive(Debug)] +pub enum Error { + Error(String), +} + +pub struct Launcher; + +impl Launcher { + pub fn start_from_tcp_listener_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(tracker); + + Box::pin(async { + axum::Server::from_tcp(tcp_listener) + .expect("Could not bind to tcp listener.") + .serve(app.into_make_service_with_connect_info::()) + .with_graceful_shutdown(shutdown_signal) + .await + .expect("Axum server crashed."); + }) + } + + pub fn start_tls_from_tcp_listener_with_graceful_shutdown( + tcp_listener: std::net::TcpListener, + (ssl_cert_path, ssl_key_path): (String, String), + tracker: Arc, + shutdown_signal: F, + ) -> BoxFuture<'static, ()> + where + F: Future + Send + 'static, + { + let app = router(tracker); + + let handle = Handle::new(); + + let cloned_handle = handle.clone(); + + tokio::task::spawn_local(async move { + shutdown_signal.await; + cloned_handle.shutdown(); + }); + + Box::pin(async { + let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) + .await + .expect("Could not read tls cert."); + + axum_server::from_tcp_rustls(tcp_listener, tls_config) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."); + }) + } +} + +#[async_trait] +impl HttpServerLauncher for Launcher { + fn new() -> Self { + Self {} + } + + fn start_with_graceful_shutdown( + &self, + cfg: torrust_tracker_configuration::HttpTracker, + tracker: Arc, + shutdown_signal: F, + ) -> (SocketAddr, BoxFuture<'static, ()>) + where + F: Future + Send + 'static, + { + let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); + let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); + let bind_addr = tcp_listener + .local_addr() + .expect("Could not get local_addr from tcp_listener."); + + if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { + let server = Self::start_tls_from_tcp_listener_with_graceful_shutdown( + tcp_listener, + (ssl_cert_path.to_string(), ssl_key_path.to_string()), + tracker, + shutdown_signal, + ); + + (bind_addr, server) + } else { + let server = Self::start_from_tcp_listener_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); + + (bind_addr, server) + } + } +} + +pub fn start(socket_addr: std::net::SocketAddr, tracker: Arc) -> impl Future> { + let app = router(tracker); + + let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust HTTP tracker server on http://{} ...", socket_addr); + }) +} + +pub fn start_tls( + socket_addr: std::net::SocketAddr, + ssl_config: RustlsConfig, + tracker: Arc, +) -> impl Future> { + let app = router(tracker); + + let handle = Handle::new(); + let shutdown_handle = handle.clone(); + + tokio::spawn(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping Torrust HTTP tracker server on https://{} ...", socket_addr); + shutdown_handle.shutdown(); + }); + + axum_server::bind_rustls(socket_addr, ssl_config) + .handle(handle) + .serve(app.into_make_service_with_connect_info::()) +} diff --git a/src/servers/http/v1/mod.rs b/src/servers/http/v1/mod.rs new file mode 100644 index 000000000..79d230255 --- /dev/null +++ b/src/servers/http/v1/mod.rs @@ -0,0 +1,8 @@ +pub mod extractors; +pub mod handlers; +pub mod launcher; +pub mod query; +pub mod requests; +pub mod responses; +pub mod routes; +pub mod services; diff --git a/src/servers/http/v1/query.rs b/src/servers/http/v1/query.rs new file mode 100644 index 000000000..c40e7949f --- /dev/null +++ b/src/servers/http/v1/query.rs @@ -0,0 +1,305 @@ +use std::panic::Location; +use std::str::FromStr; + +use multimap::MultiMap; +use thiserror::Error; + +type ParamName = String; +type ParamValue = String; + +/// Represent a URL query component: +/// +/// ```text +/// URI = scheme ":" ["//" authority] path ["?" query] ["#" fragment] +/// ``` +#[derive(Debug)] +pub struct Query { + /* code-review: + - Consider using a third-party crate. + - Conversion from/to string is not deterministic. Params can be in a different order in the query string. + */ + params: MultiMap, +} + +impl Query { + /// Returns only the first param value even if it has multiple values like this: + /// + /// ```text + /// param1=value1¶m1=value2 + /// ``` + /// + /// In that case `get_param("param1")` will return `value1`. + #[must_use] + pub fn get_param(&self, name: &str) -> Option { + self.params.get(name).map(|pair| pair.value.clone()) + } + + /// Returns all the param values as a vector even if it has only one value. + #[must_use] + pub fn get_param_vec(&self, name: &str) -> Option> { + self.params.get_vec(name).map(|pairs| { + let mut param_values = vec![]; + for pair in pairs { + param_values.push(pair.value.to_string()); + } + param_values + }) + } +} + +#[derive(Error, Debug)] +pub enum ParseQueryError { + #[error("invalid param {raw_param} in {location}")] + InvalidParam { + location: &'static Location<'static>, + raw_param: String, + }, +} + +impl FromStr for Query { + type Err = ParseQueryError; + + fn from_str(raw_query: &str) -> Result { + let mut params: MultiMap = MultiMap::new(); + + let raw_params = raw_query.trim().trim_start_matches('?').split('&').collect::>(); + + for raw_param in raw_params { + let pair: NameValuePair = raw_param.parse()?; + let param_name = pair.name.clone(); + params.insert(param_name, pair); + } + + Ok(Self { params }) + } +} + +impl From> for Query { + fn from(raw_params: Vec<(&str, &str)>) -> Self { + let mut params: MultiMap = MultiMap::new(); + + for raw_param in raw_params { + params.insert(raw_param.0.to_owned(), NameValuePair::new(raw_param.0, raw_param.1)); + } + + Self { params } + } +} + +impl std::fmt::Display for Query { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let query = self + .params + .iter_all() + .map(|param| format!("{}", FieldValuePairSet::from_vec(param.1))) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +#[derive(Debug, PartialEq, Clone)] +struct NameValuePair { + name: ParamName, + value: ParamValue, +} + +impl NameValuePair { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_owned(), + value: value.to_owned(), + } + } +} + +impl FromStr for NameValuePair { + type Err = ParseQueryError; + + fn from_str(raw_param: &str) -> Result { + let pair = raw_param.split('=').collect::>(); + + if pair.len() != 2 { + return Err(ParseQueryError::InvalidParam { + location: Location::caller(), + raw_param: raw_param.to_owned(), + }); + } + + Ok(Self { + name: pair[0].to_owned(), + value: pair[1].to_owned(), + }) + } +} + +impl std::fmt::Display for NameValuePair { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}={}", self.name, self.value) + } +} + +#[derive(Debug, PartialEq)] +struct FieldValuePairSet { + pairs: Vec, +} + +impl FieldValuePairSet { + fn from_vec(pair_vec: &Vec) -> Self { + let mut pairs: Vec = vec![]; + + for pair in pair_vec { + pairs.push(pair.clone()); + } + + Self { pairs } + } +} + +impl std::fmt::Display for FieldValuePairSet { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let query = self + .pairs + .iter() + .map(|pair| format!("{pair}")) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +#[cfg(test)] +mod tests { + + mod url_query { + use crate::servers::http::v1::query::Query; + + #[test] + fn should_parse_the_query_params_from_an_url_query_string() { + let raw_query = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!( + query.get_param("info_hash").unwrap(), + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + ); + assert_eq!(query.get_param("peer_id").unwrap(), "-qB00000000000000001"); + assert_eq!(query.get_param("port").unwrap(), "17548"); + } + + #[test] + fn should_be_instantiated_from_a_string_pair_vector() { + let query = Query::from(vec![("param1", "value1"), ("param2", "value2")]); + + assert_eq!(query.get_param("param1"), Some("value1".to_string())); + assert_eq!(query.get_param("param2"), Some("value2".to_string())); + } + + #[test] + fn should_fail_parsing_an_invalid_query_string() { + let invalid_raw_query = "name=value=value"; + + let query = invalid_raw_query.parse::(); + + assert!(query.is_err()); + } + + #[test] + fn should_ignore_the_preceding_question_mark_if_it_exists() { + let raw_query = "?name=value"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name"), Some("value".to_string())); + } + + #[test] + fn should_trim_whitespaces() { + let raw_query = " name=value "; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name"), Some("value".to_string())); + } + + mod should_allow_more_than_one_value_for_the_same_param { + use crate::servers::http::v1::query::Query; + + #[test] + fn instantiated_from_a_vector() { + let query1 = Query::from(vec![("param1", "value1"), ("param1", "value2")]); + assert_eq!( + query1.get_param_vec("param1"), + Some(vec!["value1".to_string(), "value2".to_string()]) + ); + } + + #[test] + fn parsed_from_an_string() { + let query2 = "param1=value1¶m1=value2".parse::().unwrap(); + assert_eq!( + query2.get_param_vec("param1"), + Some(vec!["value1".to_string(), "value2".to_string()]) + ); + } + } + + mod should_be_displayed { + use crate::servers::http::v1::query::Query; + + #[test] + fn with_one_param() { + assert_eq!("param1=value1".parse::().unwrap().to_string(), "param1=value1"); + } + + #[test] + fn with_multiple_params() { + let query = "param1=value1¶m2=value2".parse::().unwrap().to_string(); + assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + } + + #[test] + fn with_multiple_values_for_the_same_param() { + let query = "param1=value1¶m1=value2".parse::().unwrap().to_string(); + assert!(query == "param1=value1¶m1=value2" || query == "param1=value2¶m1=value1"); + } + } + + mod param_name_value_pair { + use crate::servers::http::v1::query::NameValuePair; + + #[test] + fn should_parse_a_single_query_param() { + let raw_param = "name=value"; + + let param = raw_param.parse::().unwrap(); + + assert_eq!( + param, + NameValuePair { + name: "name".to_string(), + value: "value".to_string(), + } + ); + } + + #[test] + fn should_fail_parsing_an_invalid_query_param() { + let invalid_raw_param = "name=value=value"; + + let query = invalid_raw_param.parse::(); + + assert!(query.is_err()); + } + + #[test] + fn should_be_displayed() { + assert_eq!("name=value".parse::().unwrap().to_string(), "name=value"); + } + } + } +} diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs new file mode 100644 index 000000000..7ab260d99 --- /dev/null +++ b/src/servers/http/v1/requests/announce.rs @@ -0,0 +1,476 @@ +use std::fmt; +use std::panic::Location; +use std::str::FromStr; + +use thiserror::Error; +use torrust_tracker_located_error::{Located, LocatedError}; + +use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::servers::http::v1::query::{ParseQueryError, Query}; +use crate::servers::http::v1::responses; +use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; +use crate::tracker::peer::{self, IdConversionError}; + +pub type NumberOfBytes = i64; + +// Query param names +const INFO_HASH: &str = "info_hash"; +const PEER_ID: &str = "peer_id"; +const PORT: &str = "port"; +const DOWNLOADED: &str = "downloaded"; +const UPLOADED: &str = "uploaded"; +const LEFT: &str = "left"; +const EVENT: &str = "event"; +const COMPACT: &str = "compact"; + +#[derive(Debug, PartialEq)] +pub struct Announce { + // Mandatory params + pub info_hash: InfoHash, + pub peer_id: peer::Id, + pub port: u16, + // Optional params + pub downloaded: Option, + pub uploaded: Option, + pub left: Option, + pub event: Option, + pub compact: Option, +} + +#[derive(Error, Debug)] +pub enum ParseAnnounceQueryError { + #[error("missing query params for announce request in {location}")] + MissingParams { location: &'static Location<'static> }, + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + #[error("invalid param value {param_value} for {param_name} in {location}")] + InvalidParam { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("param value overflow {param_value} for {param_name} in {location}")] + NumberOfBytesOverflow { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, ConversionError>, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidPeerIdParam { + param_name: String, + param_value: String, + source: LocatedError<'static, IdConversionError>, + }, +} + +#[derive(PartialEq, Debug)] +pub enum Event { + Started, + Stopped, + Completed, +} + +impl FromStr for Event { + type Err = ParseAnnounceQueryError; + + fn from_str(raw_param: &str) -> Result { + match raw_param { + "started" => Ok(Self::Started), + "stopped" => Ok(Self::Stopped), + "completed" => Ok(Self::Completed), + _ => Err(ParseAnnounceQueryError::InvalidParam { + param_name: EVENT.to_owned(), + param_value: raw_param.to_owned(), + location: Location::caller(), + }), + } + } +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Event::Started => write!(f, "started"), + Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(PartialEq, Debug)] +pub enum Compact { + Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} + +impl FromStr for Compact { + type Err = ParseAnnounceQueryError; + + fn from_str(raw_param: &str) -> Result { + match raw_param { + "1" => Ok(Self::Accepted), + "0" => Ok(Self::NotAccepted), + _ => Err(ParseAnnounceQueryError::InvalidParam { + param_name: COMPACT.to_owned(), + param_value: raw_param.to_owned(), + location: Location::caller(), + }), + } + } +} + +impl From for responses::error::Error { + fn from(err: ParseQueryError) -> Self { + responses::error::Error { + failure_reason: format!("Cannot parse query params: {err}"), + } + } +} + +impl From for responses::error::Error { + fn from(err: ParseAnnounceQueryError) -> Self { + responses::error::Error { + failure_reason: format!("Cannot parse query params for announce request: {err}"), + } + } +} + +impl TryFrom for Announce { + type Error = ParseAnnounceQueryError; + + fn try_from(query: Query) -> Result { + Ok(Self { + info_hash: extract_info_hash(&query)?, + peer_id: extract_peer_id(&query)?, + port: extract_port(&query)?, + downloaded: extract_downloaded(&query)?, + uploaded: extract_uploaded(&query)?, + left: extract_left(&query)?, + event: extract_event(&query)?, + compact: extract_compact(&query)?, + }) + } +} + +// Mandatory params + +fn extract_info_hash(query: &Query) -> Result { + match query.get_param(INFO_HASH) { + Some(raw_param) => { + Ok( + percent_decode_info_hash(&raw_param).map_err(|err| ParseAnnounceQueryError::InvalidInfoHashParam { + param_name: INFO_HASH.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?, + ) + } + None => { + return Err(ParseAnnounceQueryError::MissingParam { + location: Location::caller(), + param_name: INFO_HASH.to_owned(), + }) + } + } +} + +fn extract_peer_id(query: &Query) -> Result { + match query.get_param(PEER_ID) { + Some(raw_param) => Ok( + percent_decode_peer_id(&raw_param).map_err(|err| ParseAnnounceQueryError::InvalidPeerIdParam { + param_name: PEER_ID.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?, + ), + None => { + return Err(ParseAnnounceQueryError::MissingParam { + location: Location::caller(), + param_name: PEER_ID.to_owned(), + }) + } + } +} + +fn extract_port(query: &Query) -> Result { + match query.get_param(PORT) { + Some(raw_param) => Ok(u16::from_str(&raw_param).map_err(|_e| ParseAnnounceQueryError::InvalidParam { + param_name: PORT.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + })?), + None => { + return Err(ParseAnnounceQueryError::MissingParam { + location: Location::caller(), + param_name: PORT.to_owned(), + }) + } + } +} + +// Optional params + +fn extract_downloaded(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(DOWNLOADED, query) +} + +fn extract_uploaded(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(UPLOADED, query) +} + +fn extract_left(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(LEFT, query) +} + +fn extract_number_of_bytes_from_param(param_name: &str, query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(param_name) { + Some(raw_param) => { + let number_of_bytes = u64::from_str(&raw_param).map_err(|_e| ParseAnnounceQueryError::InvalidParam { + param_name: param_name.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + })?; + + Ok(Some(i64::try_from(number_of_bytes).map_err(|_e| { + ParseAnnounceQueryError::NumberOfBytesOverflow { + param_name: param_name.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + } + })?)) + } + None => Ok(None), + } +} + +fn extract_event(query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(EVENT) { + Some(raw_param) => Ok(Some(Event::from_str(&raw_param)?)), + None => Ok(None), + } +} + +fn extract_compact(query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(COMPACT) { + Some(raw_param) => Ok(Some(Compact::from_str(&raw_param)?)), + None => Ok(None), + } +} + +#[cfg(test)] +mod tests { + + mod announce_request { + + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::announce::{ + Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, + }; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::peer; + + #[test] + fn should_be_instantiated_from_the_url_query_with_only_the_mandatory_params() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + ]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let announce_request = Announce::try_from(query).unwrap(); + + assert_eq!( + announce_request, + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + downloaded: None, + uploaded: None, + left: None, + event: None, + compact: None, + } + ); + } + + #[test] + fn should_be_instantiated_from_the_url_query_params() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (DOWNLOADED, "1"), + (UPLOADED, "2"), + (LEFT, "3"), + (EVENT, "started"), + (COMPACT, "0"), + ]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let announce_request = Announce::try_from(query).unwrap(); + + assert_eq!( + announce_request, + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: "-qB00000000000000001".parse::().unwrap(), + port: 17548, + downloaded: Some(1), + uploaded: Some(2), + left: Some(3), + event: Some(Event::Started), + compact: Some(Compact::NotAccepted), + } + ); + } + + mod when_it_is_instantiated_from_the_url_query_params { + + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::announce::{ + Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, + }; + + #[test] + fn it_should_fail_if_the_query_does_not_include_all_the_mandatory_params() { + let raw_query_without_info_hash = "peer_id=-qB00000000000000001&port=17548"; + + assert!(Announce::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); + + let raw_query_without_peer_id = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&port=17548"; + + assert!(Announce::try_from(raw_query_without_peer_id.parse::().unwrap()).is_err()); + + let raw_query_without_port = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001"; + + assert!(Announce::try_from(raw_query_without_port.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_info_hash_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "INVALID_INFO_HASH_VALUE"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_peer_id_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "INVALID_PEER_ID_VALUE"), + (PORT, "17548"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_port_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "INVALID_PORT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_downloaded_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (DOWNLOADED, "INVALID_DOWNLOADED_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_uploaded_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (UPLOADED, "INVALID_UPLOADED_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_left_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (LEFT, "INVALID_LEFT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_event_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (EVENT, "INVALID_EVENT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_compact_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (COMPACT, "INVALID_COMPACT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + } + } +} diff --git a/src/servers/http/v1/requests/mod.rs b/src/servers/http/v1/requests/mod.rs new file mode 100644 index 000000000..776d2dfbf --- /dev/null +++ b/src/servers/http/v1/requests/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod scrape; diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs new file mode 100644 index 000000000..a7ec962e2 --- /dev/null +++ b/src/servers/http/v1/requests/scrape.rs @@ -0,0 +1,128 @@ +use std::panic::Location; + +use thiserror::Error; +use torrust_tracker_located_error::{Located, LocatedError}; + +use crate::servers::http::percent_encoding::percent_decode_info_hash; +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::responses; +use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; + +pub type NumberOfBytes = i64; + +// Query param names +const INFO_HASH: &str = "info_hash"; + +#[derive(Debug, PartialEq)] +pub struct Scrape { + pub info_hashes: Vec, +} + +#[derive(Error, Debug)] +pub enum ParseScrapeQueryError { + #[error("missing query params for scrape request in {location}")] + MissingParams { location: &'static Location<'static> }, + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, ConversionError>, + }, +} + +impl From for responses::error::Error { + fn from(err: ParseScrapeQueryError) -> Self { + responses::error::Error { + failure_reason: format!("Cannot parse query params for scrape request: {err}"), + } + } +} + +impl TryFrom for Scrape { + type Error = ParseScrapeQueryError; + + fn try_from(query: Query) -> Result { + Ok(Self { + info_hashes: extract_info_hashes(&query)?, + }) + } +} + +fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryError> { + match query.get_param_vec(INFO_HASH) { + Some(raw_params) => { + let mut info_hashes = vec![]; + + for raw_param in raw_params { + let info_hash = + percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { + param_name: INFO_HASH.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?; + + info_hashes.push(info_hash); + } + + Ok(info_hashes) + } + None => { + return Err(ParseScrapeQueryError::MissingParam { + location: Location::caller(), + param_name: INFO_HASH.to_owned(), + }) + } + } +} + +#[cfg(test)] +mod tests { + + mod scrape_request { + + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; + use crate::shared::bit_torrent::info_hash::InfoHash; + + #[test] + fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { + let raw_query = Query::from(vec![(INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0")]).to_string(); + + let query = raw_query.parse::().unwrap(); + + let scrape_request = Scrape::try_from(query).unwrap(); + + assert_eq!( + scrape_request, + Scrape { + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], + } + ); + } + + mod when_it_is_instantiated_from_the_url_query_params { + + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; + + #[test] + fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { + let raw_query_without_info_hash = "another_param=NOT_RELEVANT"; + + assert!(Scrape::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_info_hash_param_is_invalid() { + let raw_query = Query::from(vec![(INFO_HASH, "INVALID_INFO_HASH_VALUE")]).to_string(); + + assert!(Scrape::try_from(raw_query.parse::().unwrap()).is_err()); + } + } + } +} diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs new file mode 100644 index 000000000..4902e0d62 --- /dev/null +++ b/src/servers/http/v1/responses/announce.rs @@ -0,0 +1,333 @@ +use std::io::Write; +use std::net::IpAddr; +use std::panic::Location; + +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use bip_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use serde::{self, Deserialize, Serialize}; +use thiserror::Error; + +use crate::servers::http::v1::responses; +use crate::tracker::{self, AnnounceData}; + +/// Normal (non compact) "announce" response +/// +/// BEP 03: The ``BitTorrent`` Protocol Specification +/// +/// +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct NonCompact { + pub interval: u32, + #[serde(rename = "min interval")] + pub interval_min: u32, + pub complete: u32, + pub incomplete: u32, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Peer { + pub peer_id: [u8; 20], + pub ip: IpAddr, + pub port: u16, +} + +impl Peer { + #[must_use] + pub fn ben_map(&self) -> BencodeMut { + ben_map! { + "peer id" => ben_bytes!(self.peer_id.clone().to_vec()), + "ip" => ben_bytes!(self.ip.to_string()), + "port" => ben_int!(i64::from(self.port)) + } + } +} + +impl From for Peer { + fn from(peer: tracker::peer::Peer) -> Self { + Peer { + peer_id: peer.peer_id.to_bytes(), + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + } + } +} + +impl NonCompact { + /// # Panics + /// + /// Will return an error if it can't access the bencode as a mutable `BListAccess`. + #[must_use] + pub fn body(&self) -> Vec { + let mut peers_list = ben_list!(); + let peers_list_mut = peers_list.list_mut().unwrap(); + for peer in &self.peers { + peers_list_mut.push(peer.ben_map()); + } + + (ben_map! { + "complete" => ben_int!(i64::from(self.complete)), + "incomplete" => ben_int!(i64::from(self.incomplete)), + "interval" => ben_int!(i64::from(self.interval)), + "min interval" => ben_int!(i64::from(self.interval_min)), + "peers" => peers_list.clone() + }) + .encode() + } +} + +impl IntoResponse for NonCompact { + fn into_response(self) -> Response { + (StatusCode::OK, self.body()).into_response() + } +} + +impl From for NonCompact { + fn from(domain_announce_response: AnnounceData) -> Self { + let peers: Vec = domain_announce_response.peers.iter().map(|peer| Peer::from(*peer)).collect(); + + Self { + interval: domain_announce_response.interval, + interval_min: domain_announce_response.interval_min, + complete: domain_announce_response.swarm_stats.seeders, + incomplete: domain_announce_response.swarm_stats.leechers, + peers, + } + } +} + +/// Compact "announce" response +/// +/// BEP 23: Tracker Returns Compact Peer Lists +/// +/// +/// BEP 07: IPv6 Tracker Extension +/// +/// +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Compact { + pub interval: u32, + #[serde(rename = "min interval")] + pub interval_min: u32, + pub complete: u32, + pub incomplete: u32, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct CompactPeer { + pub ip: IpAddr, + pub port: u16, +} + +impl CompactPeer { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn bytes(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); + match self.ip { + IpAddr::V4(ip) => { + bytes.write_all(&u32::from(ip).to_be_bytes())?; + } + IpAddr::V6(ip) => { + bytes.write_all(&u128::from(ip).to_be_bytes())?; + } + } + bytes.write_all(&self.port.to_be_bytes())?; + Ok(bytes) + } +} + +impl From for CompactPeer { + fn from(peer: tracker::peer::Peer) -> Self { + CompactPeer { + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + } + } +} + +impl Compact { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn body(&self) -> Result, Box> { + let bytes = (ben_map! { + "complete" => ben_int!(i64::from(self.complete)), + "incomplete" => ben_int!(i64::from(self.incomplete)), + "interval" => ben_int!(i64::from(self.interval)), + "min interval" => ben_int!(i64::from(self.interval_min)), + "peers" => ben_bytes!(self.peers_v4_bytes()?), + "peers6" => ben_bytes!(self.peers_v6_bytes()?) + }) + .encode(); + + Ok(bytes) + } + + fn peers_v4_bytes(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); + for compact_peer in &self.peers { + match compact_peer.ip { + IpAddr::V4(_ip) => { + let peer_bytes = compact_peer.bytes()?; + bytes.write_all(&peer_bytes)?; + } + IpAddr::V6(_) => {} + } + } + Ok(bytes) + } + + fn peers_v6_bytes(&self) -> Result, Box> { + let mut bytes: Vec = Vec::new(); + for compact_peer in &self.peers { + match compact_peer.ip { + IpAddr::V6(_ip) => { + let peer_bytes = compact_peer.bytes()?; + bytes.write_all(&peer_bytes)?; + } + IpAddr::V4(_) => {} + } + } + Ok(bytes) + } +} + +#[derive(Error, Debug)] +pub enum CompactSerializationError { + #[error("cannot write bytes: {inner_error} in {location}")] + CannotWriteBytes { + location: &'static Location<'static>, + inner_error: String, + }, +} + +impl From for responses::error::Error { + fn from(err: CompactSerializationError) -> Self { + responses::error::Error { + failure_reason: format!("{err}"), + } + } +} + +impl IntoResponse for Compact { + fn into_response(self) -> Response { + match self.body() { + Ok(bytes) => (StatusCode::OK, bytes).into_response(), + Err(err) => responses::error::Error::from(CompactSerializationError::CannotWriteBytes { + location: Location::caller(), + inner_error: format!("{err}"), + }) + .into_response(), + } + } +} + +impl From for Compact { + fn from(domain_announce_response: AnnounceData) -> Self { + let peers: Vec = domain_announce_response + .peers + .iter() + .map(|peer| CompactPeer::from(*peer)) + .collect(); + + Self { + interval: domain_announce_response.interval, + interval_min: domain_announce_response.interval_min, + complete: domain_announce_response.swarm_stats.seeders, + incomplete: domain_announce_response.swarm_stats.leechers, + peers, + } + } +} + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + + use super::{NonCompact, Peer}; + use crate::servers::http::v1::responses::announce::{Compact, CompactPeer}; + + // Some ascii values used in tests: + // + // +-----------------+ + // | Dec | Hex | Chr | + // +-----------------+ + // | 105 | 69 | i | + // | 112 | 70 | p | + // +-----------------+ + // + // IP addresses and port numbers used in tests are chosen so that their bencoded representation + // is also a valid string which makes asserts more readable. + + #[test] + fn non_compact_announce_response_can_be_bencoded() { + let response = NonCompact { + interval: 111, + interval_min: 222, + complete: 333, + incomplete: 444, + peers: vec![ + // IPV4 + Peer { + peer_id: *b"-qB00000000000000001", + ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 + port: 0x7070, // 28784 + }, + // IPV6 + Peer { + peer_id: *b"-qB00000000000000002", + ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + port: 0x7070, // 28784 + }, + ], + }; + + let bytes = response.body(); + + // cspell:disable-next-line + let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); + } + + #[test] + fn compact_announce_response_can_be_bencoded() { + let response = Compact { + interval: 111, + interval_min: 222, + complete: 333, + incomplete: 444, + peers: vec![ + // IPV4 + CompactPeer { + ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 + port: 0x7070, // 28784 + }, + // IPV6 + CompactPeer { + ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + port: 0x7070, // 28784 + }, + ], + }; + + let bytes = response.body().unwrap(); + + let expected_bytes = + // cspell:disable-next-line + b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); + } +} diff --git a/src/servers/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs new file mode 100644 index 000000000..0bcdbd9fb --- /dev/null +++ b/src/servers/http/v1/responses/error.rs @@ -0,0 +1,40 @@ +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use serde::{self, Serialize}; + +#[derive(Serialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} + +impl Error { + /// # Panics + /// + /// It would panic if the `Error` struct contained an inappropriate type. + #[must_use] + pub fn write(&self) -> String { + serde_bencode::to_string(&self).unwrap() + } +} + +impl IntoResponse for Error { + fn into_response(self) -> Response { + (StatusCode::OK, self.write()).into_response() + } +} + +#[cfg(test)] +mod tests { + + use super::Error; + + #[test] + fn http_tracker_errors_can_be_bencoded() { + let err = Error { + failure_reason: "error message".to_owned(), + }; + + assert_eq!(err.write(), "d14:failure reason13:error messagee"); // cspell:disable-line + } +} diff --git a/src/servers/http/v1/responses/mod.rs b/src/servers/http/v1/responses/mod.rs new file mode 100644 index 000000000..bdc689056 --- /dev/null +++ b/src/servers/http/v1/responses/mod.rs @@ -0,0 +1,3 @@ +pub mod announce; +pub mod error; +pub mod scrape; diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs new file mode 100644 index 000000000..36e4f3282 --- /dev/null +++ b/src/servers/http/v1/responses/scrape.rs @@ -0,0 +1,106 @@ +use std::borrow::Cow; + +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use bip_bencode::{ben_int, ben_map, BMutAccess}; + +use crate::tracker::ScrapeData; + +#[derive(Debug, PartialEq, Default)] +pub struct Bencoded { + scrape_data: ScrapeData, +} + +impl Bencoded { + /// # Panics + /// + /// Will return an error if it can't access the bencode as a mutable `BDictAccess`. + #[must_use] + pub fn body(&self) -> Vec { + let mut scrape_list = ben_map!(); + + let scrape_list_mut = scrape_list.dict_mut().unwrap(); + + for (info_hash, value) in &self.scrape_data.files { + scrape_list_mut.insert( + Cow::from(info_hash.bytes().to_vec()), + ben_map! { + "complete" => ben_int!(i64::from(value.complete)), + "downloaded" => ben_int!(i64::from(value.downloaded)), + "incomplete" => ben_int!(i64::from(value.incomplete)) + }, + ); + } + + (ben_map! { + "files" => scrape_list + }) + .encode() + } +} + +impl From for Bencoded { + fn from(scrape_data: ScrapeData) -> Self { + Self { scrape_data } + } +} + +impl IntoResponse for Bencoded { + fn into_response(self) -> Response { + (StatusCode::OK, self.body()).into_response() + } +} + +#[cfg(test)] +mod tests { + + mod scrape_response { + use crate::servers::http::v1::responses::scrape::Bencoded; + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::torrent::SwarmMetadata; + use crate::tracker::ScrapeData; + + fn sample_scrape_data() -> ScrapeData { + let info_hash = InfoHash([0x69; 20]); + let mut scrape_data = ScrapeData::empty(); + scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 1, + downloaded: 2, + incomplete: 3, + }, + ); + scrape_data + } + + #[test] + fn should_be_converted_from_scrape_data() { + let response = Bencoded::from(sample_scrape_data()); + + assert_eq!( + response, + Bencoded { + scrape_data: sample_scrape_data() + } + ); + } + + #[test] + fn should_be_bencoded() { + let response = Bencoded { + scrape_data: sample_scrape_data(), + }; + + let bytes = response.body(); + + // cspell:disable-next-line + let expected_bytes = b"d5:filesd20:iiiiiiiiiiiiiiiiiiiid8:completei1e10:downloadedi2e10:incompletei3eeee"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); + } + } +} diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs new file mode 100644 index 000000000..a8e740f69 --- /dev/null +++ b/src/servers/http/v1/routes.rs @@ -0,0 +1,21 @@ +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; +use axum_client_ip::SecureClientIpSource; + +use super::handlers::{announce, scrape}; +use crate::tracker::Tracker; + +#[allow(clippy::needless_pass_by_value)] +pub fn router(tracker: Arc) -> Router { + Router::new() + // Announce request + .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) + .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) + // Scrape request + .route("/scrape", get(scrape::handle_without_key).with_state(tracker.clone())) + .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker)) + // Add extension to get the client IP from the connection info + .layer(SecureClientIpSource::ConnectInfo.into_extension()) +} diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs new file mode 100644 index 000000000..116dc1e95 --- /dev/null +++ b/src/servers/http/v1/services/announce.rs @@ -0,0 +1,196 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::tracker::peer::Peer; +use crate::tracker::{statistics, AnnounceData, Tracker}; + +pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceData { + let original_peer_ip = peer.peer_addr.ip(); + + // The tracker could change the original peer ip + let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip).await; + + match original_peer_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Announce).await; + } + } + + announce_data +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_test_helpers::configuration; + + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; + use crate::tracker::services::tracker_factory; + use crate::tracker::{peer, Tracker}; + + fn public_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_public().into()) + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + fn sample_peer_using_ipv4() -> peer::Peer { + sample_peer() + } + + fn sample_peer_using_ipv6() -> peer::Peer { + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + ); + peer + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + mod with_tracker_in_any_mode { + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; + + use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::servers::http::v1::services::announce::invoke; + use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + use crate::tracker::peer::Peer; + use crate::tracker::torrent::SwarmStats; + use crate::tracker::{statistics, AnnounceData, Tracker}; + + #[tokio::test] + async fn it_should_return_the_announce_data() { + let tracker = Arc::new(public_tracker()); + + let mut peer = sample_peer(); + + let announce_data = invoke(tracker.clone(), sample_info_hash(), &mut peer).await; + + let expected_announce_data = AnnounceData { + peers: vec![], + swarm_stats: SwarmStats { + completed: 0, + seeders: 1, + leechers: 0, + }, + interval: tracker.config.announce_interval, + interval_min: tracker.config.min_announce_interval, + }; + + assert_eq!(announce_data, expected_announce_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let mut peer = sample_peer_using_ipv4(); + + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer).await; + } + + fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { + let mut configuration = configuration::ephemeral(); + configuration.external_ip = + Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); + + Tracker::new(Arc::new(configuration), Some(stats_event_sender), statistics::Repo::new()).unwrap() + } + + fn peer_with_the_ipv4_loopback_ip() -> Peer { + let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new(loopback_ip, 8080); + peer + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4_even_if_the_tracker_changes_the_peer_ip_to_ipv6() + { + // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. + + // Assert that the event sent is a TCP4 event + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let mut peer = peer_with_the_ipv4_loopback_ip(); + + let _announce_data = invoke( + tracker_with_an_ipv6_external_ip(stats_event_sender).into(), + sample_info_hash(), + &mut peer, + ) + .await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() + { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let mut peer = sample_peer_using_ipv6(); + + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer).await; + } + } +} diff --git a/src/servers/http/v1/services/mod.rs b/src/servers/http/v1/services/mod.rs new file mode 100644 index 000000000..5d1acd67d --- /dev/null +++ b/src/servers/http/v1/services/mod.rs @@ -0,0 +1,3 @@ +pub mod announce; +pub mod peer_ip_resolver; +pub mod scrape; diff --git a/src/servers/http/v1/services/peer_ip_resolver.rs b/src/servers/http/v1/services/peer_ip_resolver.rs new file mode 100644 index 000000000..ac5b8c79f --- /dev/null +++ b/src/servers/http/v1/services/peer_ip_resolver.rs @@ -0,0 +1,149 @@ +//! Given this request chain: +//! +//! client <-> http proxy 1 <-> http proxy 2 <-> server +//! ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 +//! X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 +//! +//! This service resolves the peer IP from these values: +//! +//! `right_most_x_forwarded_for` = 126.0.0.2 +//! `connection_info_ip` = 126.0.0.3 +//! +//! Depending on the tracker configuration. +use std::net::IpAddr; +use std::panic::Location; + +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct ClientIpSources { + pub right_most_x_forwarded_for: Option, + pub connection_info_ip: Option, +} + +#[derive(Error, Debug)] +pub enum PeerIpResolutionError { + #[error( + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" + )] + MissingRightMostXForwardedForIp { location: &'static Location<'static> }, + #[error("cannot get the client IP from the connection info in {location}")] + MissingClientIp { location: &'static Location<'static> }, +} + +/// # Errors +/// +/// Will return an error if the peer IP cannot be obtained according to the configuration. +/// For example, if the IP is extracted from an HTTP header which is missing in the request. +pub fn invoke(on_reverse_proxy: bool, client_ip_sources: &ClientIpSources) -> Result { + if on_reverse_proxy { + resolve_peer_ip_on_reverse_proxy(client_ip_sources) + } else { + resolve_peer_ip_without_reverse_proxy(client_ip_sources) + } +} + +fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { + if let Some(ip) = remote_client_ip.connection_info_ip { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingClientIp { + location: Location::caller(), + }) + } +} + +fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { + if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::invoke; + + mod working_without_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use super::invoke; + use crate::servers::http::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + + #[test] + fn it_should_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let ip = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let error = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert!(matches!(error, PeerIpResolutionError::MissingClientIp { .. })); + } + } + + mod working_on_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use crate::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + + #[test] + fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let ip = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: None, + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let error = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert!(matches!(error, PeerIpResolutionError::MissingRightMostXForwardedForIp { .. })); + } + } +} diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs new file mode 100644 index 000000000..82ecc72e0 --- /dev/null +++ b/src/servers/http/v1/services/scrape.rs @@ -0,0 +1,245 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::tracker::{statistics, ScrapeData, Tracker}; + +pub async fn invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { + let scrape_data = tracker.scrape(info_hashes).await; + + send_scrape_event(original_peer_ip, tracker).await; + + scrape_data +} + +/// When the peer is not authenticated and the tracker is running in `private` mode, +/// the tracker returns empty stats for all the torrents. +pub async fn fake(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { + send_scrape_event(original_peer_ip, tracker).await; + + ScrapeData::zeroed(info_hashes) +} + +async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { + match original_peer_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; + } + } +} + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_test_helpers::configuration; + + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; + use crate::tracker::services::tracker_factory; + use crate::tracker::{peer, Tracker}; + + fn public_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_public().into()) + } + + fn sample_info_hashes() -> Vec { + vec![sample_info_hash()] + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + mod with_real_data { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::v1::services::scrape::invoke; + use crate::servers::http::v1::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; + use crate::tracker::torrent::SwarmMetadata; + use crate::tracker::{statistics, ScrapeData, Tracker}; + + #[tokio::test] + async fn it_should_return_the_scrape_data_for_a_torrent() { + let tracker = Arc::new(public_tracker()); + + let info_hash = sample_info_hash(); + let info_hashes = vec![info_hash]; + + // Announce a new peer to force scrape data to contain not zeroed data + let mut peer = sample_peer(); + let original_peer_ip = peer.ip(); + tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + + let scrape_data = invoke(&tracker, &info_hashes, &original_peer_ip).await; + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + + invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + + invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + } + } + + mod with_zeroed_data { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::v1::services::scrape::fake; + use crate::servers::http::v1::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; + use crate::tracker::{statistics, ScrapeData, Tracker}; + + #[tokio::test] + async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { + let tracker = Arc::new(public_tracker()); + + let info_hash = sample_info_hash(); + let info_hashes = vec![info_hash]; + + // Announce a new peer to force scrape data to contain not zeroed data + let mut peer = sample_peer(); + let original_peer_ip = peer.ip(); + tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + + let scrape_data = fake(&tracker, &info_hashes, &original_peer_ip).await; + + let expected_scrape_data = ScrapeData::zeroed(&info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + + fake(&tracker, &sample_info_hashes(), &peer_ip).await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + Arc::new(configuration::ephemeral()), + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + + fake(&tracker, &sample_info_hashes(), &peer_ip).await; + } + } +} diff --git a/src/servers/mod.rs b/src/servers/mod.rs new file mode 100644 index 000000000..a71b3f029 --- /dev/null +++ b/src/servers/mod.rs @@ -0,0 +1,4 @@ +pub mod apis; +pub mod http; +pub mod signals; +pub mod udp; diff --git a/src/servers/signals.rs b/src/servers/signals.rs new file mode 100644 index 000000000..b5a25ded7 --- /dev/null +++ b/src/servers/signals.rs @@ -0,0 +1,41 @@ +use log::info; + +/// Resolves on `ctrl_c` or the `terminate` signal. +pub async fn global_shutdown_signal() { + let ctrl_c = async { + tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {} + } +} + +/// Resolves when the `stop_receiver` or the `global_shutdown_signal()` resolves. +pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { + let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; + + tokio::select! { + _ = stop => {}, + _ = global_shutdown_signal() => {} + } +} + +/// Same as `shutdown_signal()`, but shows a message when it resolves. +pub async fn shutdown_signal_with_message(stop_receiver: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal(stop_receiver).await; + + info!("{message}"); +} diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs new file mode 100644 index 000000000..4a75145c1 --- /dev/null +++ b/src/servers/udp/connection_cookie.rs @@ -0,0 +1,254 @@ +use std::net::SocketAddr; +use std::panic::Location; + +use aquatic_udp_protocol::ConnectionId; + +use super::error::Error; +use crate::shared::clock::time_extent::{Extent, TimeExtent}; + +pub type Cookie = [u8; 8]; + +pub type SinceUnixEpochTimeExtent = TimeExtent; + +pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); + +#[must_use] +pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { + connection_id.0.to_le_bytes() +} + +#[must_use] +pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { + ConnectionId(i64::from_le_bytes(*connection_cookie)) +} + +#[must_use] +pub fn make(remote_address: &SocketAddr) -> Cookie { + let time_extent = cookie_builder::get_last_time_extent(); + + //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); + cookie_builder::build(remote_address, &time_extent) +} + +/// # Panics +/// +/// It would panic if the `COOKIE_LIFETIME` constant would be an unreasonably large number. +/// +/// # Errors +/// +/// Will return a `ServerError::InvalidConnectionId` if the supplied `connection_cookie` fails to verify. +pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { + // we loop backwards testing each time_extent until we find one that matches. + // (or the lifetime of time_extents is exhausted) + for offset in 0..=COOKIE_LIFETIME.amount { + let checking_time_extent = cookie_builder::get_last_time_extent().decrease(offset).unwrap(); + + let checking_cookie = cookie_builder::build(remote_address, &checking_time_extent); + //println!("remote_address: {remote_address:?}, time_extent: {checking_time_extent:?}, cookie: {checking_cookie:?}"); + + if *connection_cookie == checking_cookie { + return Ok(checking_time_extent); + } + } + Err(Error::InvalidConnectionId { + location: Location::caller(), + }) +} + +mod cookie_builder { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + use std::net::SocketAddr; + + use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; + use crate::shared::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; + use crate::shared::crypto::keys::seeds::{Current, Keeper}; + + pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { + DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) + .unwrap() + .unwrap() + .increase(COOKIE_LIFETIME.amount) + .unwrap() + } + + pub(super) fn build(remote_address: &SocketAddr, time_extent: &TimeExtent) -> Cookie { + let seed = Current::get_seed(); + + let mut hasher = DefaultHasher::new(); + + remote_address.hash(&mut hasher); + time_extent.hash(&mut hasher); + seed.hash(&mut hasher); + + hasher.finish().to_le_bytes() + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use super::cookie_builder::{self}; + use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; + use crate::shared::clock::time_extent::{self, Extent}; + use crate::shared::clock::{Stopped, StoppedTime}; + + // #![feature(const_socketaddr)] + // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + #[test] + fn it_should_make_a_connection_cookie() { + // Note: This constant may need to be updated in the future as the hash is not guaranteed to to be stable between versions. + const ID_COOKIE: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; + + let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); + + assert_eq!(cookie, ID_COOKIE); + } + + #[test] + fn it_should_make_the_same_connection_cookie_for_the_same_input_data() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] + //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] + + assert_eq!(cookie, cookie_2); + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_ip() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::BROADCAST), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 255.255.255.255:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [217, 87, 239, 178, 182, 126, 66, 166] + + assert_ne!(cookie, cookie_2); + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_ip_version() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: [::]:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [99, 119, 230, 177, 20, 220, 163, 187] + + assert_ne!(cookie, cookie_2); + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_socket() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 1); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 0.0.0.0:1, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [38, 8, 0, 102, 92, 170, 220, 11] + + assert_ne!(cookie, cookie_2); + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_time_extents() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + let time_extent_max = time_extent::MAX; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address, &time_extent_max); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address:?}, time_extent: {time_extent_max:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 18446744073709551615.999999999s, amount: 18446744073709551615 }, cookie: [87, 111, 109, 125, 182, 206, 3, 201] + + assert_ne!(cookie, cookie_2); + } + + #[test] + fn it_should_make_different_cookies_for_the_next_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make(&remote_address); + + Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + + let cookie_next = make(&remote_address); + + assert_ne!(cookie, cookie_next); + } + + #[test] + fn it_should_be_valid_for_this_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make(&remote_address); + + check(&remote_address, &cookie).unwrap(); + } + + #[test] + fn it_should_be_valid_for_the_next_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make(&remote_address); + + Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + + check(&remote_address, &cookie).unwrap(); + } + + #[test] + fn it_should_be_valid_for_the_last_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make(&remote_address); + + Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + + check(&remote_address, &cookie).unwrap(); + } + + #[test] + #[should_panic] + fn it_should_be_not_valid_after_their_last_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make(&remote_address); + + Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + + check(&remote_address, &cookie).unwrap(); + } +} diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs new file mode 100644 index 000000000..a6381cc78 --- /dev/null +++ b/src/servers/udp/error.rs @@ -0,0 +1,26 @@ +use std::panic::Location; + +use thiserror::Error; +use torrust_tracker_located_error::LocatedError; + +#[derive(Error, Debug)] +pub enum Error { + #[error("tracker server error: {source}")] + TrackerError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("internal server error: {message}, {location}")] + InternalServer { + location: &'static Location<'static>, + message: String, + }, + + #[error("connection id could not be verified")] + InvalidConnectionId { location: &'static Location<'static> }, + + #[error("bad request: {source}")] + BadRequest { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, +} diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs new file mode 100644 index 000000000..e00203cfc --- /dev/null +++ b/src/servers/udp/handlers.rs @@ -0,0 +1,1255 @@ +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::panic::Location; +use std::sync::Arc; + +use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, + NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, +}; +use log::debug; + +use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; +use crate::servers::udp::error::Error; +use crate::servers::udp::peer_builder; +use crate::servers::udp::request::AnnounceWrapper; +use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::tracker::{statistics, Tracker}; + +pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { + match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { + message: format!("{e:?}"), + location: Location::caller(), + }) { + Ok(request) => { + let transaction_id = match &request { + Request::Connect(connect_request) => connect_request.transaction_id, + Request::Announce(announce_request) => announce_request.transaction_id, + Request::Scrape(scrape_request) => scrape_request.transaction_id, + }; + + match handle_request(request, remote_addr, tracker).await { + Ok(response) => response, + Err(e) => handle_error(&e, transaction_id), + } + } + // bad request + Err(e) => handle_error( + &Error::BadRequest { + source: (Arc::new(e) as Arc).into(), + }, + TransactionId(0), + ), + } +} + +/// # Errors +/// +/// If a error happens in the `handle_request` function, it will just return the `ServerError`. +pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: &Tracker) -> Result { + match request { + Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, + Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, + Request::Scrape(scrape_request) => handle_scrape(remote_addr, &scrape_request, tracker).await, + } +} + +/// # Errors +/// +/// This function dose not ever return an error. +pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { + let connection_cookie = make(&remote_addr); + let connection_id = into_connection_id(&connection_cookie); + + let response = Response::from(ConnectResponse { + transaction_id: request.transaction_id, + connection_id, + }); + + // send stats event + match remote_addr { + SocketAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Connect).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Connect).await; + } + } + + Ok(response) +} + +/// # Errors +/// +/// Will return `Error` if unable to `authenticate_request`. +pub async fn authenticate(info_hash: &InfoHash, tracker: &Tracker) -> Result<(), Error> { + tracker + .authenticate_request(info_hash, &None) + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) +} + +/// # Errors +/// +/// If a error happens in the `handle_announce` function, it will just return the `ServerError`. +pub async fn handle_announce( + remote_addr: SocketAddr, + announce_request: &AnnounceRequest, + tracker: &Tracker, +) -> Result { + debug!("udp announce request: {:#?}", announce_request); + + check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; + + let wrapped_announce_request = AnnounceWrapper::new(announce_request); + + let info_hash = wrapped_announce_request.info_hash; + let remote_client_ip = remote_addr.ip(); + + authenticate(&info_hash, tracker).await?; + + let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); + + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; + + match remote_client_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Announce).await; + } + } + + #[allow(clippy::cast_possible_truncation)] + let announce_response = if remote_addr.is_ipv4() { + Response::from(AnnounceResponse { + transaction_id: wrapped_announce_request.announce_request.transaction_id, + announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), + peers: response + .peers + .iter() + .filter_map(|peer| { + if let IpAddr::V4(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip, + port: Port(peer.peer_addr.port()), + }) + } else { + None + } + }) + .collect(), + }) + } else { + Response::from(AnnounceResponse { + transaction_id: wrapped_announce_request.announce_request.transaction_id, + announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), + peers: response + .peers + .iter() + .filter_map(|peer| { + if let IpAddr::V6(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip, + port: Port(peer.peer_addr.port()), + }) + } else { + None + } + }) + .collect(), + }) + }; + + Ok(announce_response) +} + +/// # Errors +/// +/// This function dose not ever return an error. +pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { + // Convert from aquatic infohashes + let mut info_hashes = vec![]; + for info_hash in &request.info_hashes { + info_hashes.push(InfoHash(info_hash.0)); + } + + let scrape_data = tracker.scrape(&info_hashes).await; + + let mut torrent_stats: Vec = Vec::new(); + + for file in &scrape_data.files { + let info_hash = file.0; + let swarm_metadata = file.1; + + let scrape_entry = if tracker.authenticate_request(info_hash, &None).await.is_ok() { + #[allow(clippy::cast_possible_truncation)] + TorrentScrapeStatistics { + seeders: NumberOfPeers(i64::from(swarm_metadata.complete) as i32), + completed: NumberOfDownloads(i64::from(swarm_metadata.downloaded) as i32), + leechers: NumberOfPeers(i64::from(swarm_metadata.incomplete) as i32), + } + } else { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + } + }; + + torrent_stats.push(scrape_entry); + } + + // send stats event + match remote_addr { + SocketAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Scrape).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Scrape).await; + } + } + + Ok(Response::from(ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats, + })) +} + +fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { + let message = e.to_string(); + Response::from(ErrorResponse { + transaction_id, + message: message.into(), + }) +} + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; + + use crate::shared::clock::{Current, Time}; + use crate::tracker::services::tracker_factory; + use crate::tracker::{peer, Tracker}; + + fn tracker_configuration() -> Arc { + Arc::new(default_testing_tracker_configuration()) + } + + fn default_testing_tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + fn public_tracker() -> Arc { + initialized_tracker(configuration::ephemeral_mode_public().into()) + } + + fn private_tracker() -> Arc { + initialized_tracker(configuration::ephemeral_mode_private().into()) + } + + fn whitelisted_tracker() -> Arc { + initialized_tracker(configuration::ephemeral_mode_whitelisted().into()) + } + + fn initialized_tracker(configuration: Arc) -> Arc { + tracker_factory(configuration).into() + } + + fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() + } + + fn sample_ipv6_remote_addr() -> SocketAddr { + sample_ipv6_socket_address() + } + + fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + fn sample_ipv6_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + } + + struct TorrentPeerBuilder { + peer: peer::Peer, + } + + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = peer::Peer { + peer_id: peer::Id([255u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: Current::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_peer_addr(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + pub fn into(self) -> peer::Peer { + self.peer + } + } + + struct TrackerConfigurationBuilder { + configuration: Configuration, + } + + impl TrackerConfigurationBuilder { + pub fn default() -> TrackerConfigurationBuilder { + let default_configuration = default_testing_tracker_configuration(); + TrackerConfigurationBuilder { + configuration: default_configuration, + } + } + + pub fn with_external_ip(mut self, external_ip: &str) -> Self { + self.configuration.external_ip = Some(external_ip.to_owned()); + self + } + + pub fn into(self) -> Configuration { + self.configuration + } + } + + mod connect_request { + + use std::future; + use std::sync::Arc; + + use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + use mockall::predicate::eq; + + use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_connect; + use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; + use crate::tracker::{self, statistics}; + + fn sample_connect_request() -> ConnectRequest { + ConnectRequest { + transaction_id: TransactionId(0i32), + } + } + + #[tokio::test] + async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let request = ConnectRequest { + transaction_id: TransactionId(0i32), + }; + + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) + .await + .unwrap(); + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id() { + let request = ConnectRequest { + transaction_id: TransactionId(0i32), + }; + + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) + .await + .unwrap(); + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp4Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let client_socket_address = sample_ipv4_socket_address(); + + let torrent_tracker = Arc::new( + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + ); + handle_connect(client_socket_address, &sample_connect_request(), &torrent_tracker) + .await + .unwrap(); + } + + #[tokio::test] + async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp6Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let torrent_tracker = Arc::new( + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + ); + handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), &torrent_tracker) + .await + .unwrap(); + } + } + + mod announce_request { + + use std::net::Ipv4Addr; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, + TransactionId, + }; + + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::tests::sample_ipv4_remote_addr; + + struct AnnounceRequestBuilder { + request: AnnounceRequest, + } + + impl AnnounceRequestBuilder { + pub fn default() -> AnnounceRequestBuilder { + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); + + let default_request = AnnounceRequest { + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), + transaction_id: TransactionId(0i32), + info_hash: info_hash_aquatic, + peer_id: AquaticPeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(client_ip), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client_port), + }; + AnnounceRequestBuilder { + request: default_request, + } + } + + pub fn with_connection_id(mut self, connection_id: ConnectionId) -> Self { + self.request.connection_id = connection_id; + self + } + + pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { + self.request.info_hash = info_hash; + self + } + + pub fn with_peer_id(mut self, peer_id: AquaticPeerId) -> Self { + self.request.peer_id = peer_id; + self + } + + pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { + self.request.ip_address = Some(ip_address); + self + } + + pub fn with_port(mut self, port: u16) -> Self { + self.request.port = Port(port); + self + } + + pub fn into(self) -> AnnounceRequest { + self.request + } + } + + mod using_ipv4 { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, + Response, ResponsePeer, + }; + use mockall::predicate::eq; + + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, + }; + use crate::tracker::{self, peer, statistics}; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let tracker = public_tracker(); + + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let expected_peer = TorrentPeerBuilder::default() + .with_peer_id(peer::Id(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .into(); + + assert_eq!(peers[0], expected_peer); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32), + leechers: NumberOfPeers(0i32), + seeders: NumberOfPeers(1i32), + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let tracker = public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = Ipv4Addr::new(126, 0, 0, 1); + let remote_client_port = 8081; + let peer_address = Ipv4Addr::new(126, 0, 0, 2); + + let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); + } + + async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv6 = TorrentPeerBuilder::default() + .with_peer_id(peer::Id(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6) + .await; + } + + async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap() + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { + let tracker = public_tracker(); + + add_a_torrent_peer_using_ipv6(tracker.clone()).await; + + let response = announce_a_new_peer_using_ipv4(tracker.clone()).await; + + // The response should not contain the peer using IPV6 + let peers: Option>> = match response { + Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv6_peers = peers.is_none(); + assert!(no_ipv6_peers); + } + + #[tokio::test] + async fn should_send_the_upd4_announce_event() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + ); + + handle_announce( + sample_ipv4_socket_address(), + &AnnounceRequestBuilder::default().into(), + &tracker, + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; + use crate::tracker::peer; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { + let tracker = public_tracker(); + + let client_ip = Ipv4Addr::new(127, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let external_ip_in_tracker_configuration = + tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + + let expected_peer = TorrentPeerBuilder::default() + .with_peer_id(peer::Id(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V4(external_ip_in_tracker_configuration), client_port)) + .into(); + + assert_eq!(peers[0], expected_peer); + } + } + } + + mod using_ipv6 { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, + Response, ResponsePeer, + }; + use mockall::predicate::eq; + + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, + }; + use crate::tracker::{self, peer, statistics}; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let tracker = public_tracker(); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let expected_peer = TorrentPeerBuilder::default() + .with_peer_id(peer::Id(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + assert_eq!(peers[0], expected_peer); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32), + leechers: NumberOfPeers(0i32), + seeders: NumberOfPeers(1i32), + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let tracker = public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = "::100".parse().unwrap(); // IPV4 ::0.0.1.0 -> IPV6 = ::100 = ::ffff:0:100 = 0:0:0:0:0:ffff:0:0100 + let remote_client_port = 8081; + let peer_address = "126.0.0.1".parse().unwrap(); + + let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + // When using IPv6 the tracker converts the remote client ip into a IPv4 address + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); + } + + async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv4 = TorrentPeerBuilder::default() + .with_peer_id(peer::Id(peer_id.0)) + .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .into(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4) + .await; + } + + async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap() + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { + let tracker = public_tracker(); + + add_a_torrent_peer_using_ipv4(tracker.clone()).await; + + let response = announce_a_new_peer_using_ipv6(tracker.clone()).await; + + // The response should not contain the peer using IPV4 + let peers: Option>> = match response { + Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv4_peers = peers.is_none(); + assert!(no_ipv4_peers); + } + + #[tokio::test] + async fn should_send_the_upd6_announce_event() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + ); + + let remote_addr = sample_ipv6_remote_addr(); + + let announce_request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + handle_announce(remote_addr, &announce_request, &tracker).await.unwrap(); + } + + mod from_a_loopback_ip { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::TrackerConfigurationBuilder; + use crate::tracker; + use crate::tracker::statistics::Keeper; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + let tracker = + Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); + + let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); + let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + + let client_ip_v4 = loopback_ipv4; + let client_ip_v6 = loopback_ipv6; + let client_port = 8080; + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + + let _external_ip_in_tracker_configuration = + tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + + // There's a special type of IPv6 addresses that provide compatibility with IPv4. + // The last 32 bits of these addresses represent an IPv4, and are represented like this: + // 1111:2222:3333:4444:5555:6666:1.2.3.4 + // + // ::127.0.0.1 is the IPV6 representation for the IPV4 address 127.0.0.1. + assert_eq!(Ok(peers[0].peer_addr.ip()), "::126.0.0.1".parse()); + } + } + } + } + + mod scrape_request { + use std::net::SocketAddr; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, + TransactionId, + }; + + use super::TorrentPeerBuilder; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; + use crate::tracker::{self, peer}; + + fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + } + } + + #[tokio::test] + async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { + let remote_addr = sample_ipv4_remote_addr(); + + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + let request = ScrapeRequest { + connection_id: into_connection_id(&make(&remote_addr)), + transaction_id: TransactionId(0i32), + info_hashes, + }; + + let response = handle_scrape(remote_addr, &request, &public_tracker()).await.unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!( + response, + Response::from(ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats: expected_torrent_stats + }) + ); + } + + async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + let peer_id = peer::Id([255u8; 20]); + + let peer = TorrentPeerBuilder::default() + .with_peer_id(peer::Id(peer_id.0)) + .with_peer_addr(*remote_addr) + .with_bytes_left(0) + .into(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer) + .await; + } + + fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { + let info_hashes = vec![*info_hash]; + + ScrapeRequest { + connection_id: into_connection_id(&make(remote_addr)), + transaction_id: TransactionId(0i32), + info_hashes, + } + } + + async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + handle_scrape(remote_addr, &request, &tracker).await.unwrap() + } + + fn match_scrape_response(response: Response) -> Option { + match response { + Response::Scrape(scrape_response) => Some(scrape_response), + _ => None, + } + } + + mod with_a_public_tracker { + use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::servers::udp::handlers::tests::public_tracker; + use crate::servers::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; + + #[tokio::test] + async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { + let tracker = public_tracker(); + + let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + }]; + + assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); + } + } + + mod with_a_private_tracker { + + use aquatic_udp_protocol::InfoHash; + + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::scrape_request::{ + add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, + }; + use crate::servers::udp::handlers::tests::{private_tracker, sample_ipv4_remote_addr}; + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { + let tracker = private_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let non_existing_info_hash = InfoHash([0u8; 20]); + + let request = build_scrape_request(&remote_addr, &non_existing_info_hash); + + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_tracker_has_the_requested_torrent_because_authenticated_requests_are_not_supported_in_udp_tracker( + ) { + let tracker = private_tracker(); + + let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + mod with_a_whitelisted_tracker { + use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::scrape_request::{ + add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, + }; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, whitelisted_tracker}; + + #[tokio::test] + async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { + let tracker = whitelisted_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + tracker.add_torrent_to_memory_whitelist(&info_hash.0.into()).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1), + completed: NumberOfDownloads(0), + leechers: NumberOfPeers(0), + }]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { + let tracker = whitelisted_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + fn sample_scrape_request(remote_addr: &SocketAddr) -> ScrapeRequest { + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + ScrapeRequest { + connection_id: into_connection_id(&make(remote_addr)), + transaction_id: TransactionId(0i32), + info_hashes, + } + } + + mod using_ipv4 { + use std::future; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::sample_scrape_request; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; + use crate::tracker::{self, statistics}; + + #[tokio::test] + async fn should_send_the_upd4_scrape_event() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let remote_addr = sample_ipv4_remote_addr(); + let tracker = Arc::new( + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + ); + + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) + .await + .unwrap(); + } + } + + mod using_ipv6 { + use std::future; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::sample_scrape_request; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; + use crate::tracker::{self, statistics}; + + #[tokio::test] + async fn should_send_the_upd6_scrape_event() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let remote_addr = sample_ipv6_remote_addr(); + let tracker = Arc::new( + tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + ); + + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) + .await + .unwrap(); + } + } + } +} diff --git a/src/torrust_udp_tracker/mod.rs b/src/servers/udp/mod.rs similarity index 50% rename from src/torrust_udp_tracker/mod.rs rename to src/servers/udp/mod.rs index 6aa5fbce0..7b755a20b 100644 --- a/src/torrust_udp_tracker/mod.rs +++ b/src/servers/udp/mod.rs @@ -1,16 +1,13 @@ -pub mod errors; +pub mod connection_cookie; +pub mod error; +pub mod handlers; +pub mod peer_builder; pub mod request; pub mod server; -pub mod handlers; - -pub use self::errors::*; -pub use self::request::*; -pub use self::server::*; -pub use self::handlers::*; pub type Bytes = u64; pub type Port = u16; pub type TransactionId = i64; pub const MAX_PACKET_SIZE: usize = 1496; -pub const PROTOCOL_ID: i64 = 0x41727101980; +pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs new file mode 100644 index 000000000..8d8852dc7 --- /dev/null +++ b/src/servers/udp/peer_builder.rs @@ -0,0 +1,18 @@ +use std::net::{IpAddr, SocketAddr}; + +use super::request::AnnounceWrapper; +use crate::shared::clock::{Current, Time}; +use crate::tracker::peer::{Id, Peer}; + +#[must_use] +pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> Peer { + Peer { + peer_id: Id(announce_wrapper.announce_request.peer_id.0), + peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), + updated: Current::now(), + uploaded: announce_wrapper.announce_request.bytes_uploaded, + downloaded: announce_wrapper.announce_request.bytes_downloaded, + left: announce_wrapper.announce_request.bytes_left, + event: announce_wrapper.announce_request.event, + } +} diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs new file mode 100644 index 000000000..4be99e6d0 --- /dev/null +++ b/src/servers/udp/request.rs @@ -0,0 +1,18 @@ +use aquatic_udp_protocol::AnnounceRequest; + +use crate::shared::bit_torrent::info_hash::InfoHash; + +pub struct AnnounceWrapper { + pub announce_request: AnnounceRequest, + pub info_hash: InfoHash, +} + +impl AnnounceWrapper { + #[must_use] + pub fn new(announce_request: &AnnounceRequest) -> Self { + AnnounceWrapper { + announce_request: announce_request.clone(), + info_hash: InfoHash(announce_request.info_hash.0), + } + } +} diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs new file mode 100644 index 000000000..9eb9836fe --- /dev/null +++ b/src/servers/udp/server.rs @@ -0,0 +1,201 @@ +use std::future::Future; +use std::io::Cursor; +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::Response; +use futures::pin_mut; +use log::{debug, error, info}; +use tokio::net::UdpSocket; +use tokio::task::JoinHandle; + +use crate::servers::signals::shutdown_signal; +use crate::servers::udp::handlers::handle_packet; +use crate::servers::udp::MAX_PACKET_SIZE; +use crate::tracker::Tracker; + +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[allow(clippy::module_name_repetitions)] +pub type StoppedUdpServer = UdpServer; +#[allow(clippy::module_name_repetitions)] +pub type RunningUdpServer = UdpServer; + +#[allow(clippy::module_name_repetitions)] +pub struct UdpServer { + pub cfg: torrust_tracker_configuration::UdpTracker, + pub state: S, +} + +pub struct Stopped; + +pub struct Running { + pub bind_address: SocketAddr, + stop_job_sender: tokio::sync::oneshot::Sender, + job: JoinHandle<()>, +} + +impl UdpServer { + #[must_use] + pub fn new(cfg: torrust_tracker_configuration::UdpTracker) -> Self { + Self { cfg, state: Stopped {} } + } + + /// # Errors + /// + /// Will return `Err` if UDP can't bind to given bind address. + pub async fn start(self, tracker: Arc) -> Result, Error> { + let udp = Udp::new(&self.cfg.bind_address) + .await + .map_err(|e| Error::Error(e.to_string()))?; + + let bind_address = udp.socket.local_addr().map_err(|e| Error::Error(e.to_string()))?; + + let (sender, receiver) = tokio::sync::oneshot::channel::(); + + let job = tokio::spawn(async move { + udp.start_with_graceful_shutdown(tracker, shutdown_signal(receiver)).await; + }); + + let running_udp_server: UdpServer = UdpServer { + cfg: self.cfg, + state: Running { + bind_address, + stop_job_sender: sender, + job, + }, + }; + + Ok(running_udp_server) + } +} + +impl UdpServer { + /// # Errors + /// + /// Will return `Err` if the oneshot channel to send the stop signal + /// has already been called once. + pub async fn stop(self) -> Result, Error> { + self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; + + let _ = self.state.job.await; + + let stopped_api_server: UdpServer = UdpServer { + cfg: self.cfg, + state: Stopped {}, + }; + + Ok(stopped_api_server) + } +} + +pub struct Udp { + socket: Arc, +} + +impl Udp { + /// # Errors + /// + /// Will return `Err` unable to bind to the supplied `bind_address`. + pub async fn new(bind_address: &str) -> tokio::io::Result { + let socket = UdpSocket::bind(bind_address).await?; + + Ok(Udp { + socket: Arc::new(socket), + }) + } + + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. + pub async fn start(&self, tracker: Arc) { + loop { + let mut data = [0; MAX_PACKET_SIZE]; + let socket = self.socket.clone(); + + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Stopping UDP server: {}..", socket.local_addr().unwrap()); + break; + } + Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { + let payload = data[..valid_bytes].to_vec(); + + info!("Received {} bytes", payload.len()); + debug!("From: {}", &remote_addr); + debug!("Payload: {:?}", payload); + + let response = handle_packet(remote_addr, payload, &tracker).await; + + Udp::send_response(socket, remote_addr, response).await; + } + } + } + } + + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. + async fn start_with_graceful_shutdown(&self, tracker: Arc, shutdown_signal: F) + where + F: Future, + { + // Pin the future so that it doesn't move to the first loop iteration. + pin_mut!(shutdown_signal); + + loop { + let mut data = [0; MAX_PACKET_SIZE]; + let socket = self.socket.clone(); + + tokio::select! { + _ = &mut shutdown_signal => { + info!("Stopping UDP server: {}..", self.socket.local_addr().unwrap()); + break; + } + Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { + let payload = data[..valid_bytes].to_vec(); + + info!("Received {} bytes", payload.len()); + debug!("From: {}", &remote_addr); + debug!("Payload: {:?}", payload); + + let response = handle_packet(remote_addr, payload, &tracker).await; + + Udp::send_response(socket, remote_addr, response).await; + } + } + } + } + + async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { + let buffer = vec![0u8; MAX_PACKET_SIZE]; + let mut cursor = Cursor::new(buffer); + + match response.write(&mut cursor) { + Ok(_) => { + #[allow(clippy::cast_possible_truncation)] + let position = cursor.position() as usize; + let inner = cursor.get_ref(); + + info!("Sending {} bytes ...", &inner[..position].len()); + debug!("To: {:?}", &remote_addr); + debug!("Payload: {:?}", &inner[..position]); + + Udp::send_packet(socket, &remote_addr, &inner[..position]).await; + + info!("{} bytes sent", &inner[..position].len()); + } + Err(_) => { + error!("could not write response to bytes."); + } + } + } + + async fn send_packet(socket: Arc, remote_addr: &SocketAddr, payload: &[u8]) { + // doesn't matter if it reaches or not + drop(socket.send_to(payload, remote_addr).await); + } +} diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs new file mode 100644 index 000000000..527ae9ebc --- /dev/null +++ b/src/shared/bit_torrent/common.rs @@ -0,0 +1,27 @@ +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use serde::{Deserialize, Serialize}; + +pub const MAX_SCRAPE_TORRENTS: u8 = 74; +pub const AUTH_KEY_LENGTH: usize = 32; + +#[repr(u32)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub enum Actions { + Connect = 0, + Announce = 1, + Scrape = 2, + Error = 3, +} + +#[derive(Serialize, Deserialize)] +#[serde(remote = "AnnounceEvent")] +pub enum AnnounceEventDef { + Started, + Stopped, + Completed, + None, +} + +#[derive(Serialize, Deserialize)] +#[serde(remote = "NumberOfBytes")] +pub struct NumberOfBytesDef(pub i64); diff --git a/src/shared/bit_torrent/info_hash.rs b/src/shared/bit_torrent/info_hash.rs new file mode 100644 index 000000000..fd7602cdd --- /dev/null +++ b/src/shared/bit_torrent/info_hash.rs @@ -0,0 +1,281 @@ +use std::panic::Location; + +use thiserror::Error; + +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +pub struct InfoHash(pub [u8; 20]); + +const INFO_HASH_BYTES_LEN: usize = 20; + +impl InfoHash { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); + let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + /// For readability, when accessing the bytes array + #[must_use] + pub fn bytes(&self) -> [u8; 20] { + self.0 + } + + #[must_use] + pub fn to_hex_string(&self) -> String { + self.to_string() + } +} + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self([0u8; 20]); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::cmp::PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = InfoHash([0u8; 20]); + ret.0.clone_from_slice(data); + ret + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) + } +} + +#[derive(Error, Debug)] +pub enum ConversionError { + #[error("not enough bytes for infohash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + #[error("too many bytes for infohash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a 40 character long string", + )); + } + + let mut res = InfoHash([0u8; 20]); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a hexadecimal string", + )); + }; + Ok(res) + } +} + +#[cfg(test)] +mod tests { + + use std::str::FromStr; + + use serde::{Deserialize, Serialize}; + use serde_json::json; + + use super::InfoHash; + + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] + struct ContainingInfoHash { + pub info_hash: InfoHash, + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); + assert!(info_hash.is_ok()); + } + + #[test] + fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { + let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { + let info_hash = InfoHash::from_str(&"F".repeat(39)); + assert!(info_hash.is_err()); + + let info_hash = InfoHash::from_str(&"F".repeat(41)); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + let output = format!("{info_hash}"); + + assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_should_return_its_a_40_utf8_lowercased_char_hex_representations_as_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + assert_eq!(info_hash.to_hex_string(), "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { + let info_hash: InfoHash = [255u8; 20].as_slice().into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { + let info_hash: InfoHash = [255u8; 20].into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_byte_vector() { + let info_hash: InfoHash = [255u8; 20].to_vec().try_into().unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_less_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 19].to_vec()).is_err()); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_more_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 21].to_vec()).is_err()); + } + + #[test] + fn an_info_hash_can_be_serialized() { + let s = ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + }; + + let json_serialized_value = serde_json::to_string(&s).unwrap(); + + assert_eq!( + json_serialized_value, + r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# + ); + } + + #[test] + fn an_info_hash_can_be_deserialized() { + let json = json!({ + "info_hash": "ffffffffffffffffffffffffffffffffffffffff", + }); + + let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); + + assert_eq!( + s, + ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + } + ); + } +} diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs new file mode 100644 index 000000000..7579a0780 --- /dev/null +++ b/src/shared/bit_torrent/mod.rs @@ -0,0 +1,2 @@ +pub mod common; +pub mod info_hash; diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs new file mode 100644 index 000000000..b5001e10e --- /dev/null +++ b/src/shared/clock/mod.rs @@ -0,0 +1,331 @@ +pub mod static_time; +pub mod time_extent; +pub mod utils; + +use std::num::IntErrorKind; +use std::str::FromStr; +use std::time::Duration; + +use chrono::{DateTime, NaiveDateTime, Utc}; + +pub type DurationSinceUnixEpoch = Duration; + +#[derive(Debug)] +pub enum Type { + WorkingClock, + StoppedClock, +} + +#[derive(Debug)] +pub struct Clock; + +pub type Working = Clock<{ Type::WorkingClock as usize }>; +pub type Stopped = Clock<{ Type::StoppedClock as usize }>; + +#[cfg(not(test))] +pub type Current = Working; + +#[cfg(test)] +pub type Current = Stopped; + +pub trait Time: Sized { + fn now() -> DurationSinceUnixEpoch; +} + +pub trait TimeNow: Time { + #[must_use] + fn add(add_time: &Duration) -> Option { + Self::now().checked_add(*add_time) + } + #[must_use] + fn sub(sub_time: &Duration) -> Option { + Self::now().checked_sub(*sub_time) + } +} + +/// # Panics +/// +/// Will panic if the input time cannot be converted to `DateTime::`. +/// +#[must_use] +pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { + convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) +} + +/// # Panics +/// +/// Will panic if the input time overflows the u64 type. +/// +#[must_use] +pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) +} + +/// # Panics +/// +/// Will panic if the input time overflows the i64 type. +/// +#[must_use] +pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { + DateTime::::from_utc( + NaiveDateTime::from_timestamp_opt( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ) + .unwrap(), + Utc, + ) +} + +#[cfg(test)] +mod tests { + use std::any::TypeId; + + use crate::shared::clock::{Current, Stopped, Time, Working}; + + #[test] + fn it_should_be_the_stopped_clock_as_default_when_testing() { + // We are testing, so we should default to the fixed time. + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(Stopped::now(), Current::now()); + } + + #[test] + fn it_should_have_different_times() { + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(Stopped::now(), Working::now()); + } + + mod timestamp { + use chrono::{DateTime, NaiveDateTime, Utc}; + + use crate::shared::clock::{ + convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, + DurationSinceUnixEpoch, + }; + + #[test] + fn should_be_converted_to_datetime_utc() { + let timestamp = DurationSinceUnixEpoch::ZERO; + assert_eq!( + convert_from_timestamp_to_datetime_utc(timestamp), + DateTime::::from_utc(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc) + ); + } + + #[test] + fn should_be_converted_from_datetime_utc() { + let datetime = DateTime::::from_utc(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc); + assert_eq!( + convert_from_datetime_utc_to_timestamp(&datetime), + DurationSinceUnixEpoch::ZERO + ); + } + + #[test] + fn should_be_converted_from_datetime_utc_in_iso_8601() { + let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); + assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); + } + } +} + +mod working_clock { + use std::time::SystemTime; + + use super::{DurationSinceUnixEpoch, Time, TimeNow, Working}; + + impl Time for Working { + fn now() -> DurationSinceUnixEpoch { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() + } + } + + impl TimeNow for Working {} +} + +pub trait StoppedTime: TimeNow { + fn local_set(unix_time: &DurationSinceUnixEpoch); + fn local_set_to_unix_epoch() { + Self::local_set(&DurationSinceUnixEpoch::ZERO); + } + fn local_set_to_app_start_time(); + fn local_set_to_system_time_now(); + + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. + fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; + fn local_reset(); +} + +mod stopped_clock { + use std::num::IntErrorKind; + use std::time::Duration; + + use super::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow}; + + impl Time for Stopped { + fn now() -> DurationSinceUnixEpoch { + detail::FIXED_TIME.with(|time| { + return *time.borrow(); + }) + } + } + + impl TimeNow for Stopped {} + + impl StoppedTime for Stopped { + fn local_set(unix_time: &DurationSinceUnixEpoch) { + detail::FIXED_TIME.with(|time| { + *time.borrow_mut() = *unix_time; + }); + } + + fn local_set_to_app_start_time() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_set_to_system_time_now() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_add(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::PosOverflow); + } + }; + Ok(()) + }) + } + + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::NegOverflow); + } + }; + Ok(()) + }) + } + + fn local_reset() { + Self::local_set(&detail::get_default_fixed_time()); + } + } + + #[cfg(test)] + mod tests { + use std::thread; + use std::time::Duration; + + use crate::shared::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; + + #[test] + fn it_should_default_to_zero_when_testing() { + assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); + } + + #[test] + fn it_should_possible_to_set_the_time() { + // Check we start with ZERO. + assert_eq!(Stopped::now(), Duration::ZERO); + + // Set to Current Time and Check + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + + // Elapse the Current Time and Check + Stopped::local_add(×tamp).unwrap(); + assert_eq!(Stopped::now(), timestamp + timestamp); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } + + #[test] + fn it_should_default_to_zero_on_thread_exit() { + assert_eq!(Stopped::now(), Duration::ZERO); + let after5 = Working::add(&Duration::from_secs(5)).unwrap(); + Stopped::local_set(&after5); + assert_eq!(Stopped::now(), after5); + + let t = thread::spawn(move || { + // each thread starts out with the initial value of ZERO + assert_eq!(Stopped::now(), Duration::ZERO); + + // and gets set to the current time. + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + }); + + // wait for the thread to complete and bail out on panic + t.join().unwrap(); + + // we retain our original value of current time + 5sec despite the child thread + assert_eq!(Stopped::now(), after5); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } + } + + mod detail { + use std::cell::RefCell; + use std::time::SystemTime; + + use crate::shared::clock::{static_time, DurationSinceUnixEpoch}; + + pub fn get_app_start_time() -> DurationSinceUnixEpoch { + (*static_time::TIME_AT_APP_START) + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + } + + #[cfg(not(test))] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + get_app_start_time() + } + + #[cfg(test)] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::ZERO + } + + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + + #[cfg(test)] + mod tests { + use std::time::Duration; + + use crate::shared::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; + + #[test] + fn it_should_get_the_zero_start_time_when_testing() { + assert_eq!(get_default_fixed_time(), Duration::ZERO); + } + + #[test] + fn it_should_get_app_start_time() { + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); + assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); + } + } + } +} diff --git a/src/shared/clock/static_time.rs b/src/shared/clock/static_time.rs new file mode 100644 index 000000000..f916cec9c --- /dev/null +++ b/src/shared/clock/static_time.rs @@ -0,0 +1,5 @@ +use std::time::SystemTime; + +lazy_static! { + pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); +} diff --git a/src/shared/clock/time_extent.rs b/src/shared/clock/time_extent.rs new file mode 100644 index 000000000..64142c404 --- /dev/null +++ b/src/shared/clock/time_extent.rs @@ -0,0 +1,549 @@ +use std::num::{IntErrorKind, TryFromIntError}; +use std::time::Duration; + +use super::{Stopped, TimeNow, Type, Working}; + +pub trait Extent: Sized + Default { + type Base; + type Multiplier; + type Product; + + fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; + + /// # Errors + /// + /// Will return `IntErrorKind` if `add` would overflow the internal `Duration`. + fn increase(&self, add: Self::Multiplier) -> Result; + + /// # Errors + /// + /// Will return `IntErrorKind` if `sub` would underflow the internal `Duration`. + fn decrease(&self, sub: Self::Multiplier) -> Result; + + fn total(&self) -> Option>; + fn total_next(&self) -> Option>; +} + +pub type Base = Duration; +pub type Multiplier = u64; +pub type Product = Base; + +#[derive(Debug, Default, Hash, PartialEq, Eq)] +pub struct TimeExtent { + pub increment: Base, + pub amount: Multiplier, +} + +pub const ZERO: TimeExtent = TimeExtent { + increment: Base::ZERO, + amount: Multiplier::MIN, +}; +pub const MAX: TimeExtent = TimeExtent { + increment: Base::MAX, + amount: Multiplier::MAX, +}; + +impl TimeExtent { + #[must_use] + pub const fn from_sec(seconds: u64, amount: &Multiplier) -> Self { + Self { + increment: Base::from_secs(seconds), + amount: *amount, + } + } +} + +fn checked_duration_from_nanos(time: u128) -> Result { + const NANOS_PER_SEC: u32 = 1_000_000_000; + + let secs = time.div_euclid(u128::from(NANOS_PER_SEC)); + let nanos = time.rem_euclid(u128::from(NANOS_PER_SEC)); + + assert!(nanos < u128::from(NANOS_PER_SEC)); + + match u64::try_from(secs) { + Err(error) => Err(error), + Ok(secs) => Ok(Duration::new(secs, nanos.try_into().unwrap())), + } +} + +impl Extent for TimeExtent { + type Base = Base; + type Multiplier = Multiplier; + type Product = Product; + + fn new(increment: &Self::Base, amount: &Self::Multiplier) -> Self { + Self { + increment: *increment, + amount: *amount, + } + } + + fn increase(&self, add: Self::Multiplier) -> Result { + match self.amount.checked_add(add) { + None => Err(IntErrorKind::PosOverflow), + Some(amount) => Ok(Self { + increment: self.increment, + amount, + }), + } + } + + fn decrease(&self, sub: Self::Multiplier) -> Result { + match self.amount.checked_sub(sub) { + None => Err(IntErrorKind::NegOverflow), + Some(amount) => Ok(Self { + increment: self.increment, + amount, + }), + } + } + + fn total(&self) -> Option> { + self.increment + .as_nanos() + .checked_mul(u128::from(self.amount)) + .map(checked_duration_from_nanos) + } + + fn total_next(&self) -> Option> { + self.increment + .as_nanos() + .checked_mul(u128::from(self.amount) + 1) + .map(checked_duration_from_nanos) + } +} + +pub trait Make: Sized +where + Clock: TimeNow, +{ + #[must_use] + fn now(increment: &Base) -> Option> { + Clock::now() + .as_nanos() + .checked_div((*increment).as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }) + } + + #[must_use] + fn now_after(increment: &Base, add_time: &Duration) -> Option> { + match Clock::add(add_time) { + None => None, + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), + } + } + + #[must_use] + fn now_before(increment: &Base, sub_time: &Duration) -> Option> { + match Clock::sub(sub_time) { + None => None, + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), + } + } +} + +#[derive(Debug)] +pub struct Maker {} + +pub type WorkingTimeExtentMaker = Maker<{ Type::WorkingClock as usize }>; +pub type StoppedTimeExtentMaker = Maker<{ Type::StoppedClock as usize }>; + +impl Make for WorkingTimeExtentMaker {} +impl Make for StoppedTimeExtentMaker {} + +#[cfg(not(test))] +pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; + +#[cfg(test)] +pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; + +#[cfg(test)] +mod test { + + use crate::shared::clock::time_extent::{ + checked_duration_from_nanos, Base, DefaultTimeExtentMaker, Extent, Make, Multiplier, Product, TimeExtent, MAX, ZERO, + }; + use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + + const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); + + mod fn_checked_duration_from_nanos { + use std::time::Duration; + + use super::*; + + const NANOS_PER_SEC: u32 = 1_000_000_000; + + #[test] + fn it_should_give_zero_for_zero_input() { + assert_eq!(checked_duration_from_nanos(0).unwrap(), Duration::ZERO); + } + + #[test] + fn it_should_be_the_same_as_duration_implementation_for_u64_numbers() { + assert_eq!( + checked_duration_from_nanos(1_232_143_214_343_432).unwrap(), + Duration::from_nanos(1_232_143_214_343_432) + ); + assert_eq!( + checked_duration_from_nanos(u128::from(u64::MAX)).unwrap(), + Duration::from_nanos(u64::MAX) + ); + } + + #[test] + fn it_should_work_for_some_numbers_larger_than_u64() { + assert_eq!( + checked_duration_from_nanos(u128::from(TIME_EXTENT_VAL.amount) * u128::from(NANOS_PER_SEC)).unwrap(), + Duration::from_secs(TIME_EXTENT_VAL.amount) + ); + } + + #[test] + fn it_should_fail_for_numbers_that_are_too_large() { + assert_eq!( + checked_duration_from_nanos(u128::MAX).unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod time_extent { + use super::*; + + mod fn_default { + + use super::*; + + #[test] + fn it_should_default_initialize_to_zero() { + assert_eq!(TimeExtent::default(), ZERO); + } + } + + mod fn_from_sec { + use super::*; + + #[test] + fn it_should_make_empty_for_zero() { + assert_eq!(TimeExtent::from_sec(u64::MIN, &Multiplier::MIN), ZERO); + } + #[test] + fn it_should_make_from_seconds() { + assert_eq!( + TimeExtent::from_sec(TIME_EXTENT_VAL.increment.as_secs(), &TIME_EXTENT_VAL.amount), + TIME_EXTENT_VAL + ); + } + } + + mod fn_new { + use super::*; + + #[test] + fn it_should_make_empty_for_zero() { + assert_eq!(TimeExtent::new(&Base::ZERO, &Multiplier::MIN), ZERO); + } + + #[test] + fn it_should_make_new() { + assert_eq!( + TimeExtent::new(&Base::from_millis(2), &TIME_EXTENT_VAL.amount), + TimeExtent { + increment: Base::from_millis(2), + amount: TIME_EXTENT_VAL.amount + } + ); + } + } + + mod fn_increase { + use std::num::IntErrorKind; + + use super::*; + + #[test] + fn it_should_not_increase_for_zero() { + assert_eq!(ZERO.increase(0).unwrap(), ZERO); + } + + #[test] + fn it_should_increase() { + assert_eq!( + TIME_EXTENT_VAL.increase(50).unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount + 50, + } + ); + } + + #[test] + fn it_should_fail_when_attempting_to_increase_beyond_bounds() { + assert_eq!(TIME_EXTENT_VAL.increase(u64::MAX), Err(IntErrorKind::PosOverflow)); + } + } + + mod fn_decrease { + use std::num::IntErrorKind; + + use super::*; + + #[test] + fn it_should_not_decrease_for_zero() { + assert_eq!(ZERO.decrease(0).unwrap(), ZERO); + } + + #[test] + fn it_should_decrease() { + assert_eq!( + TIME_EXTENT_VAL.decrease(50).unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount - 50, + } + ); + } + + #[test] + fn it_should_fail_when_attempting_to_decrease_beyond_bounds() { + assert_eq!(TIME_EXTENT_VAL.decrease(u64::MAX), Err(IntErrorKind::NegOverflow)); + } + } + + mod fn_total { + use super::*; + + #[test] + fn it_should_be_zero_for_zero() { + assert_eq!(ZERO.total().unwrap().unwrap(), Product::ZERO); + } + + #[test] + fn it_should_give_a_total() { + assert_eq!( + TIME_EXTENT_VAL.total().unwrap().unwrap(), + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + + assert_eq!( + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + .total() + .unwrap() + .unwrap(), + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + + assert_eq!( + TimeExtent::new(&Base::from_secs(1), &(u64::MAX)).total().unwrap().unwrap(), + Product::from_secs(u64::MAX) + ); + } + + #[test] + fn it_should_fail_when_too_large() { + assert_eq!(MAX.total(), None); + } + + #[test] + fn it_should_fail_when_product_is_too_large() { + let time_extent = TimeExtent { + increment: MAX.increment, + amount: 2, + }; + assert_eq!( + time_extent.total().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod fn_total_next { + use super::*; + + #[test] + fn it_should_be_zero_for_zero() { + assert_eq!(ZERO.total_next().unwrap().unwrap(), Product::ZERO); + } + + #[test] + fn it_should_give_a_total() { + assert_eq!( + TIME_EXTENT_VAL.total_next().unwrap().unwrap(), + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) + ); + + assert_eq!( + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + .total_next() + .unwrap() + .unwrap(), + Product::new( + TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount), + Base::from_millis(2).as_nanos().try_into().unwrap() + ) + ); + + assert_eq!( + TimeExtent::new(&Base::from_secs(1), &(u64::MAX - 1)) + .total_next() + .unwrap() + .unwrap(), + Product::from_secs(u64::MAX) + ); + } + + #[test] + fn it_should_fail_when_too_large() { + assert_eq!(MAX.total_next(), None); + } + + #[test] + fn it_should_fail_when_product_is_too_large() { + let time_extent = TimeExtent { + increment: MAX.increment, + amount: 2, + }; + assert_eq!( + time_extent.total_next().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + } + + mod make_time_extent { + use super::*; + + mod fn_now { + use super::*; + + #[test] + fn it_should_give_a_time_extent() { + assert_eq!( + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: 0 + } + ); + + Current::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + + assert_eq!( + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TIME_EXTENT_VAL + ); + } + + #[test] + fn it_should_fail_for_zero() { + assert_eq!(DefaultTimeExtentMaker::now(&Base::ZERO), None); + } + + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + Current::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod fn_now_after { + use std::time::Duration; + + use super::*; + + #[test] + fn it_should_give_a_time_extent() { + assert_eq!( + DefaultTimeExtentMaker::now_after( + &TIME_EXTENT_VAL.increment, + &Duration::from_secs(TIME_EXTENT_VAL.amount * 2) + ) + .unwrap() + .unwrap(), + TIME_EXTENT_VAL + ); + } + + #[test] + fn it_should_fail_for_zero() { + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); + + Current::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); + } + + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + Current::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + mod fn_now_before { + use std::time::Duration; + + use super::*; + + #[test] + fn it_should_give_a_time_extent() { + Current::local_set(&DurationSinceUnixEpoch::MAX); + + assert_eq!( + DefaultTimeExtentMaker::now_before( + &Base::from_secs(u64::from(u32::MAX)), + &Duration::from_secs(u64::from(u32::MAX)) + ) + .unwrap() + .unwrap(), + TimeExtent { + increment: Base::from_secs(u64::from(u32::MAX)), + amount: 4_294_967_296 + } + ); + } + + #[test] + fn it_should_fail_for_zero() { + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::ZERO), None); + + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::MAX), None); + } + + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + Current::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + } +} diff --git a/src/shared/clock/utils.rs b/src/shared/clock/utils.rs new file mode 100644 index 000000000..9127f97b1 --- /dev/null +++ b/src/shared/clock/utils.rs @@ -0,0 +1,9 @@ +use super::DurationSinceUnixEpoch; + +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. +pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] + ser.serialize_u64(unix_time_value.as_millis() as u64) +} diff --git a/src/shared/crypto/ephemeral_instance_keys.rs b/src/shared/crypto/ephemeral_instance_keys.rs new file mode 100644 index 000000000..635d10fbd --- /dev/null +++ b/src/shared/crypto/ephemeral_instance_keys.rs @@ -0,0 +1,8 @@ +use rand::rngs::ThreadRng; +use rand::Rng; + +pub type Seed = [u8; 32]; + +lazy_static! { + pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); +} diff --git a/src/shared/crypto/keys.rs b/src/shared/crypto/keys.rs new file mode 100644 index 000000000..5e04eb551 --- /dev/null +++ b/src/shared/crypto/keys.rs @@ -0,0 +1,95 @@ +pub mod seeds { + use self::detail::CURRENT_SEED; + use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; + + pub trait Keeper { + type Seed: Sized + Default + AsMut<[u8]>; + fn get_seed() -> &'static Self::Seed; + } + + pub struct Instance; + pub struct Current; + + impl Keeper for Instance { + type Seed = Seed; + + fn get_seed() -> &'static Self::Seed { + &RANDOM_SEED + } + } + + impl Keeper for Current { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &CURRENT_SEED + } + } + + #[cfg(test)] + mod tests { + use super::detail::ZEROED_TEST_SEED; + use super::{Current, Instance, Keeper}; + use crate::shared::crypto::ephemeral_instance_keys::Seed; + + pub struct ZeroedTestSeed; + + impl Keeper for ZeroedTestSeed { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &ZEROED_TEST_SEED + } + } + + #[test] + fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { + assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); + } + + #[test] + fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { + assert_ne!(Current::get_seed(), Instance::get_seed()); + } + } + + mod detail { + use crate::shared::crypto::ephemeral_instance_keys::Seed; + + #[allow(dead_code)] + pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; + + #[cfg(test)] + pub use ZEROED_TEST_SEED as CURRENT_SEED; + + #[cfg(not(test))] + pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; + + #[cfg(test)] + mod tests { + use std::convert::TryInto; + + use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; + use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; + use crate::shared::crypto::keys::seeds::CURRENT_SEED; + + #[test] + fn it_should_have_a_zero_test_seed() { + assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); + } + + #[test] + fn it_should_default_to_zeroed_seed_when_testing() { + assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); + } + + #[test] + fn it_should_have_a_large_random_seed() { + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); + } + } + } +} diff --git a/src/shared/crypto/mod.rs b/src/shared/crypto/mod.rs new file mode 100644 index 000000000..066eb0f46 --- /dev/null +++ b/src/shared/crypto/mod.rs @@ -0,0 +1,2 @@ +pub mod ephemeral_instance_keys; +pub mod keys; diff --git a/src/shared/mod.rs b/src/shared/mod.rs new file mode 100644 index 000000000..4b0d9138e --- /dev/null +++ b/src/shared/mod.rs @@ -0,0 +1,3 @@ +pub mod bit_torrent; +pub mod clock; +pub mod crypto; diff --git a/src/torrust_http_tracker/errors.rs b/src/torrust_http_tracker/errors.rs deleted file mode 100644 index d8d6c7623..000000000 --- a/src/torrust_http_tracker/errors.rs +++ /dev/null @@ -1,34 +0,0 @@ -use warp::reject::Reject; -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum ServerError { - #[error("internal server error")] - InternalServerError, - - #[error("info_hash is either missing or invalid")] - InvalidInfoHash, - - #[error("peer_id is either missing or invalid")] - InvalidPeerId, - - #[error("could not find remote address")] - AddressNotFound, - - #[error("torrent has no peers")] - NoPeersFound, - - #[error("torrent not on whitelist")] - TorrentNotWhitelisted, - - #[error("peer not authenticated")] - PeerNotAuthenticated, - - #[error("invalid authentication key")] - PeerKeyNotValid, - - #[error("exceeded info_hash limit")] - ExceededInfoHashLimit, -} - -impl Reject for ServerError {} diff --git a/src/torrust_http_tracker/filters.rs b/src/torrust_http_tracker/filters.rs deleted file mode 100644 index 61fa20a45..000000000 --- a/src/torrust_http_tracker/filters.rs +++ /dev/null @@ -1,179 +0,0 @@ -use std::convert::Infallible; -use std::net::{IpAddr, SocketAddr}; -use std::str::FromStr; -use std::sync::Arc; -use log::debug; -use warp::{Filter, reject, Rejection}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, PeerId, TorrentTracker}; -use crate::key_manager::AuthKey; -use crate::torrust_http_tracker::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; - -/// Pass Arc along -pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { - warp::any() - .map(move || tracker.clone()) -} - -/// Check for infoHash -pub fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { - warp::filters::query::raw() - .and_then(info_hashes) -} - -/// Check for PeerId -pub fn with_peer_id() -> impl Filter + Clone { - warp::filters::query::raw() - .and_then(peer_id) -} - -/// Pass Arc along -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { - warp::path::param::() - .map(|key: String| { - AuthKey::from_string(&key) - }) - .or_else(|_| async { - Ok::<(Option,), Infallible>((None,)) - }) -} - -/// Check for PeerAddress -pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::addr::remote() - .and(warp::header::optional::("X-Forwarded-For")) - .map(move |remote_addr: Option, x_forwarded_for: Option| { - (on_reverse_proxy, remote_addr, x_forwarded_for) - }) - .and_then(peer_addr) -} - -/// Check for AnnounceRequest -pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::filters::query::query::() - .and(with_info_hash()) - .and(with_peer_id()) - .and(with_peer_addr(on_reverse_proxy)) - .and_then(announce_request) -} - -/// Check for ScrapeRequest -pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::any() - .and(with_info_hash()) - .and(with_peer_addr(on_reverse_proxy)) - .and_then(scrape_request) -} - -/// Parse InfoHash from raw query string -async fn info_hashes(raw_query: String) -> WebResult> { - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); - let mut info_hashes: Vec = Vec::new(); - - for v in split_raw_query { - if v.contains("info_hash") { - let raw_info_hash = v.split("=").collect::>()[1]; - let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); - if let Ok(ih) = info_hash { - info_hashes.push(ih); - } - } - } - - if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { - Err(reject::custom(ServerError::ExceededInfoHashLimit)) - } else if info_hashes.len() < 1 { - Err(reject::custom(ServerError::InvalidInfoHash)) - } else { - Ok(info_hashes) - } -} - -/// Parse PeerId from raw query string -async fn peer_id(raw_query: String) -> WebResult { - // put all query params in a vec - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); - - let mut peer_id: Option = None; - - for v in split_raw_query { - // look for the peer_id param - if v.contains("peer_id") { - // get raw percent_encoded peer_id - let raw_peer_id = v.split("=").collect::>()[1]; - - // decode peer_id - let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); - - // peer_id must be 20 bytes - if peer_id_bytes.len() != 20 { - return Err(reject::custom(ServerError::InvalidPeerId)); - } - - // clone peer_id_bytes into fixed length array - let mut byte_arr: [u8; 20] = Default::default(); - byte_arr.clone_from_slice(peer_id_bytes.as_slice()); - - peer_id = Some(PeerId(byte_arr)); - break; - } - } - - if peer_id.is_none() { - Err(reject::custom(ServerError::InvalidPeerId)) - } else { - Ok(peer_id.unwrap()) - } -} - -/// Get PeerAddress from RemoteAddress or Forwarded -async fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { - if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)) - } - - if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)) - } - - match on_reverse_proxy { - true => { - let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); - // remove whitespace chars - x_forwarded_for_raw.retain(|c| !c.is_whitespace()); - // get all forwarded ip's in a vec - let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); - // set client ip to last forwarded ip - let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - - IpAddr::from_str(x_forwarded_ip).or_else(|e| { - debug!("{}", e); - Err(reject::custom(ServerError::AddressNotFound)) - }) - }, - false => Ok(remote_addr.unwrap().ip()) - } -} - -/// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option -async fn announce_request(announce_request_query: AnnounceRequestQuery, info_hashes: Vec, peer_id: PeerId, peer_addr: IpAddr) -> WebResult { - Ok(AnnounceRequest { - info_hash: info_hashes[0], - peer_addr, - downloaded: announce_request_query.downloaded.unwrap_or(0), - uploaded: announce_request_query.uploaded.unwrap_or(0), - peer_id, - port: announce_request_query.port, - left: announce_request_query.left.unwrap_or(0), - event: announce_request_query.event, - compact: announce_request_query.compact - }) -} - -/// Parse ScrapeRequest from InfoHash -async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(ScrapeRequest { - info_hashes, - peer_addr, - }) -} diff --git a/src/torrust_http_tracker/handlers.rs b/src/torrust_http_tracker/handlers.rs deleted file mode 100644 index f15b7143a..000000000 --- a/src/torrust_http_tracker/handlers.rs +++ /dev/null @@ -1,147 +0,0 @@ -use std::collections::HashMap; -use std::convert::Infallible; -use std::sync::Arc; -use log::debug; -use warp::{reject, Rejection, Reply}; -use warp::http::{Response}; -use crate::{InfoHash, TorrentError, TorrentPeer, TorrentStats, TorrentTracker}; -use crate::key_manager::AuthKey; -use crate::torrust_http_tracker::{AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, WebResult}; -use crate::utils::url_encode_bytes; - -/// Authenticate InfoHash using optional AuthKey -pub async fn authenticate(info_hash: &InfoHash, auth_key: &Option, tracker: Arc) -> Result<(), ServerError> { - match tracker.authenticate_request(info_hash, auth_key).await { - Ok(_) => Ok(()), - Err(e) => { - let err = match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, - }; - - Err(err) - } - } -} - -/// Handle announce request -pub async fn handle_announce(announce_request: AnnounceRequest, auth_key: Option, tracker: Arc) -> WebResult { - if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { - return Err(reject::custom(e)) - } - - debug!("{:?}", announce_request); - - let peer = TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer).await; - - // get all torrent peers excluding the peer_addr - let peers = tracker.get_torrent_peers(&announce_request.info_hash, &peer.peer_addr).await; - - // success response - let tracker_copy = tracker.clone(); - let is_ipv4 = announce_request.peer_addr.is_ipv4(); - - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if is_ipv4 { - status_writer.tcp4_connections_handled += 1; - status_writer.tcp4_announces_handled += 1; - } else { - status_writer.tcp6_connections_handled += 1; - status_writer.tcp6_announces_handled += 1; - } - }); - - let announce_interval = tracker.config.announce_interval; - - send_announce_response(&announce_request, torrent_stats, peers, announce_interval, tracker.config.announce_interval_min) -} - -/// Handle scrape request -pub async fn handle_scrape(scrape_request: ScrapeRequest, auth_key: Option, tracker: Arc) -> WebResult { - let mut files: HashMap = HashMap::new(); - let db = tracker.get_torrents().await; - - for info_hash in scrape_request.info_hashes.iter() { - // authenticate every info_hash - if authenticate(info_hash, &auth_key, tracker.clone()).await.is_err() { continue } - - let scrape_entry = match db.get(&info_hash) { - Some(torrent_info) => { - let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeResponseEntry { complete: seeders, downloaded: completed, incomplete: leechers } - } - None => { - ScrapeResponseEntry { complete: 0, downloaded: 0, incomplete: 0 } - } - }; - - if let Ok(encoded_info_hash) = url_encode_bytes(&info_hash.0) { - files.insert(encoded_info_hash, scrape_entry); - } - } - - let tracker_copy = tracker.clone(); - - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if scrape_request.peer_addr.is_ipv4() { - status_writer.tcp4_connections_handled += 1; - status_writer.tcp4_scrapes_handled += 1; - } else { - status_writer.tcp6_connections_handled += 1; - status_writer.tcp6_scrapes_handled += 1; - } - }); - - send_scrape_response(files) -} - -/// Send announce response -fn send_announce_response(announce_request: &AnnounceRequest, torrent_stats: TorrentStats, peers: Vec, interval: u32, interval_min: u32) -> WebResult { - let http_peers: Vec = peers.iter().map(|peer| Peer { - peer_id: peer.peer_id.to_string(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port() - }).collect(); - - let res = AnnounceResponse { - interval, - interval_min, - complete: torrent_stats.seeders, - incomplete: torrent_stats.leechers, - peers: http_peers - }; - - // check for compact response request - if let Some(1) = announce_request.compact { - match res.write_compact() { - Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)) - } - } else { - Ok(Response::new(res.write().into())) - } -} - -/// Send scrape response -fn send_scrape_response(files: HashMap) -> WebResult { - Ok(Response::new(ScrapeResponse { files }.write())) -} - -/// Handle all server errors and send error reply -pub async fn send_error(r: Rejection) -> std::result::Result { - let body = if let Some(server_error) = r.find::() { - debug!("{:?}", server_error); - ErrorResponse { failure_reason: server_error.to_string() }.write() - } else { - ErrorResponse { failure_reason: ServerError::InternalServerError.to_string() }.write() - }; - - Ok(Response::new(body)) -} diff --git a/src/torrust_http_tracker/mod.rs b/src/torrust_http_tracker/mod.rs deleted file mode 100644 index ea6675dce..000000000 --- a/src/torrust_http_tracker/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -pub mod server; -pub mod request; -pub mod response; -pub mod errors; -pub mod routes; -pub mod handlers; -pub mod filters; - -pub use self::server::*; -pub use self::request::*; -pub use self::response::*; -pub use self::errors::*; -pub use self::routes::*; -pub use self::handlers::*; -pub use self::filters::*; - -pub type Bytes = u64; -pub type WebResult = std::result::Result; diff --git a/src/torrust_http_tracker/request.rs b/src/torrust_http_tracker/request.rs deleted file mode 100644 index 0fb316671..000000000 --- a/src/torrust_http_tracker/request.rs +++ /dev/null @@ -1,33 +0,0 @@ -use std::net::{IpAddr}; -use serde::{Deserialize}; -use crate::{InfoHash, PeerId}; -use crate::torrust_http_tracker::Bytes; - -#[derive(Deserialize)] -pub struct AnnounceRequestQuery { - pub downloaded: Option, - pub uploaded: Option, - pub key: Option, - pub port: u16, - pub left: Option, - pub event: Option, - pub compact: Option, -} - -#[derive(Debug)] -pub struct AnnounceRequest { - pub info_hash: InfoHash, - pub peer_addr: IpAddr, - pub downloaded: Bytes, - pub uploaded: Bytes, - pub peer_id: PeerId, - pub port: u16, - pub left: Bytes, - pub event: Option, - pub compact: Option, -} - -pub struct ScrapeRequest { - pub info_hashes: Vec, - pub peer_addr: IpAddr, -} diff --git a/src/torrust_http_tracker/response.rs b/src/torrust_http_tracker/response.rs deleted file mode 100644 index d459a6fdd..000000000 --- a/src/torrust_http_tracker/response.rs +++ /dev/null @@ -1,96 +0,0 @@ -use std::collections::HashMap; -use std::error::Error; -use std::io::Write; -use std::net::IpAddr; -use serde::{Serialize}; - -#[derive(Serialize)] -pub struct Peer { - pub peer_id: String, - pub ip: IpAddr, - pub port: u16, -} - -#[derive(Serialize)] -pub struct AnnounceResponse { - pub interval: u32, - pub interval_min: u32, - //pub tracker_id: String, - pub complete: u32, - pub incomplete: u32, - pub peers: Vec -} - -impl AnnounceResponse { - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } - - pub fn write_compact(&self) -> Result, Box> { - let mut peers_v4: Vec = Vec::new(); - let mut peers_v6: Vec = Vec::new(); - - for peer in &self.peers { - match peer.ip { - IpAddr::V4(ip) => { - peers_v4.write(&u32::from(ip).to_be_bytes())?; - peers_v4.write(&peer.port.to_be_bytes())?; - } - IpAddr::V6(ip) => { - peers_v6.write(&u128::from(ip).to_be_bytes())?; - peers_v6.write(&peer.port.to_be_bytes())?; - } - } - } - - let mut bytes: Vec = Vec::new(); - bytes.write(b"d8:intervali")?; - bytes.write(&self.interval.to_string().as_bytes())?; - bytes.write(b"e12:min intervali")?; - bytes.write(&self.interval_min.to_string().as_bytes())?; - bytes.write(b"e8:completei")?; - bytes.write(&self.complete.to_string().as_bytes())?; - bytes.write(b"e10:incompletei")?; - bytes.write(&self.incomplete.to_string().as_bytes())?; - bytes.write(b"e5:peers")?; - bytes.write(&peers_v4.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v4.as_slice())?; - bytes.write(b"e6:peers6")?; - bytes.write(&peers_v6.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v6.as_slice())?; - bytes.write(b"e")?; - - Ok(bytes) - } -} - -#[derive(Serialize)] -pub struct ScrapeResponseEntry { - pub complete: u32, - pub downloaded: u32, - pub incomplete: u32, -} - -#[derive(Serialize)] -pub struct ScrapeResponse { - pub files: HashMap -} - -impl ScrapeResponse { - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } -} - -#[derive(Serialize)] -pub struct ErrorResponse { - pub failure_reason: String -} - -impl ErrorResponse { - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } -} diff --git a/src/torrust_http_tracker/routes.rs b/src/torrust_http_tracker/routes.rs deleted file mode 100644 index 4b4de722f..000000000 --- a/src/torrust_http_tracker/routes.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::convert::Infallible; -use std::sync::Arc; -use warp::{Filter, Rejection}; -use crate::TorrentTracker; -use crate::torrust_http_tracker::{handle_announce, send_error, handle_scrape, with_announce_request, with_auth_key, with_scrape_request, with_tracker}; - -/// All routes -pub fn routes(tracker: Arc,) -> impl Filter + Clone { - root(tracker.clone()) - .or(announce(tracker.clone())) - .or(scrape(tracker.clone())) - .recover(send_error) -} - -/// GET / or / -fn root(tracker: Arc,) -> impl Filter + Clone { - warp::any() - .and(warp::filters::method::get()) - .and(with_announce_request(tracker.config.on_reverse_proxy)) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_announce) -} - -/// GET /announce or /announce/ -fn announce(tracker: Arc,) -> impl Filter + Clone { - warp::path::path("announce") - .and(warp::filters::method::get()) - .and(with_announce_request(tracker.config.on_reverse_proxy)) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_announce) -} - -/// GET /scrape/ -fn scrape(tracker: Arc,) -> impl Filter + Clone { - warp::path::path("scrape") - .and(warp::filters::method::get()) - .and(with_scrape_request(tracker.config.on_reverse_proxy)) - .and(with_auth_key()) - .and(with_tracker(tracker)) - .and_then(handle_scrape) -} diff --git a/src/torrust_http_tracker/server.rs b/src/torrust_http_tracker/server.rs deleted file mode 100644 index 69811b3d9..000000000 --- a/src/torrust_http_tracker/server.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; -use crate::TorrentTracker; -use crate::torrust_http_tracker::routes; - -/// Server that listens on HTTP, needs a TorrentTracker -#[derive(Clone)] -pub struct HttpServer { - tracker: Arc, -} - -impl HttpServer { - pub fn new(tracker: Arc) -> HttpServer { - HttpServer { - tracker - } - } - - /// Start the HttpServer - pub async fn start(&self, socket_addr: SocketAddr) { - let (_addr, server) = warp::serve(routes(self.tracker.clone())) - .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c() - .await - .expect("failed to listen to shutdown signal"); - }); - tokio::task::spawn(server); - } - - /// Start the HttpServer in TLS mode - pub async fn start_tls(&self, socket_addr: SocketAddr, ssl_cert_path: &str, ssl_key_path: &str) { - let (_addr, server) = warp::serve(routes(self.tracker.clone())) - .tls() - .cert_path(ssl_cert_path) - .key_path(ssl_key_path) - .bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c() - .await - .expect("failed to listen to shutdown signal"); - }); - tokio::task::spawn(server); - } -} diff --git a/src/torrust_udp_tracker/errors.rs b/src/torrust_udp_tracker/errors.rs deleted file mode 100644 index fb29e969e..000000000 --- a/src/torrust_udp_tracker/errors.rs +++ /dev/null @@ -1,31 +0,0 @@ -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum ServerError { - #[error("internal server error")] - InternalServerError, - - #[error("info_hash is either missing or invalid")] - InvalidInfoHash, - - #[error("could not find remote address")] - AddressNotFound, - - #[error("torrent has no peers")] - NoPeersFound, - - #[error("torrent not on whitelist")] - TorrentNotWhitelisted, - - #[error("peer not authenticated")] - PeerNotAuthenticated, - - #[error("invalid authentication key")] - PeerKeyNotValid, - - #[error("exceeded info_hash limit")] - ExceededInfoHashLimit, - - #[error("bad request")] - BadRequest, -} diff --git a/src/torrust_udp_tracker/handlers.rs b/src/torrust_udp_tracker/handlers.rs deleted file mode 100644 index bf25a8861..000000000 --- a/src/torrust_udp_tracker/handlers.rs +++ /dev/null @@ -1,202 +0,0 @@ -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId}; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS, TorrentError, TorrentPeer, TorrentTracker}; -use crate::torrust_udp_tracker::errors::ServerError; -use crate::torrust_udp_tracker::request::AnnounceRequestWrapper; -use crate::utils::get_connection_id; - -pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { - match tracker.authenticate_request(info_hash, &None).await { - Ok(_) => Ok(()), - Err(e) => { - let err = match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, - }; - - Err(err) - } - } -} - -pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { - match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { - Ok(request) => { - let transaction_id = match &request { - Request::Connect(connect_request) => { - connect_request.transaction_id - } - Request::Announce(announce_request) => { - announce_request.transaction_id - } - Request::Scrape(scrape_request) => { - scrape_request.transaction_id - } - }; - - match handle_request(request, remote_addr, tracker).await { - Ok(response) => response, - Err(e) => handle_error(e, transaction_id) - } - } - // bad request - Err(_) => handle_error(ServerError::BadRequest, TransactionId(0)) - } -} - -pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: Arc) -> Result { - match request { - Request::Connect(connect_request) => { - handle_connect(remote_addr, &connect_request, tracker).await - } - Request::Announce(announce_request) => { - handle_announce(remote_addr, &announce_request, tracker).await - } - Request::Scrape(scrape_request) => { - handle_scrape(remote_addr, &scrape_request, tracker).await - } - } -} - -pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: Arc) -> Result { - let connection_id = get_connection_id(&remote_addr); - - let response = Response::from(ConnectResponse { - transaction_id: request.transaction_id, - connection_id, - }); - - let tracker_copy = tracker.clone(); - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if remote_addr.is_ipv4() { - status_writer.udp4_connections_handled += 1; - } else { - status_writer.udp6_connections_handled += 1; - } - }); - - Ok(response) -} - -pub async fn handle_announce(remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: Arc) -> Result { - let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); - - authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; - - let peer = TorrentPeer::from_udp_announce_request(&wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip()); - - //let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; - - let torrent_stats = tracker.update_torrent_with_peer_and_get_stats(&wrapped_announce_request.info_hash, &peer).await; - - // get all peers excluding the client_addr - let peers = tracker.get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr).await; - - let tracker_copy = tracker.clone(); - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if remote_addr.is_ipv4() { - status_writer.udp4_announces_handled += 1; - } else { - status_writer.udp6_announces_handled += 1; - } - }); - - let announce_response = if remote_addr.is_ipv4() { - Response::from(AnnounceResponse { - transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), - peers: peers.iter() - .filter_map(|peer| if let IpAddr::V4(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip, - port: Port(peer.peer_addr.port()) - }) - } else { - None - } - ).collect() - }) - } else { - Response::from(AnnounceResponse { - transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), - peers: peers.iter() - .filter_map(|peer| if let IpAddr::V6(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip, - port: Port(peer.peer_addr.port()) - }) - } else { - None - } - ).collect() - }) - }; - - Ok(announce_response) -} - -// todo: refactor this, db lock can be a lot shorter -pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: Arc) -> Result { - let db = tracker.get_torrents().await; - - let mut torrent_stats: Vec = Vec::new(); - - for info_hash in request.info_hashes.iter() { - let info_hash = InfoHash(info_hash.0); - - if authenticate(&info_hash, tracker.clone()).await.is_err() { continue } - - let scrape_entry = match db.get(&info_hash) { - Some(torrent_info) => { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - TorrentScrapeStatistics { - seeders: NumberOfPeers(seeders as i32), - completed: NumberOfDownloads(completed as i32), - leechers: NumberOfPeers(leechers as i32), - } - } - None => { - TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), - } - } - }; - - torrent_stats.push(scrape_entry); - } - - let tracker_copy = tracker.clone(); - tokio::spawn(async move { - let mut status_writer = tracker_copy.set_stats().await; - if remote_addr.is_ipv4() { - status_writer.udp4_scrapes_handled += 1; - } else { - status_writer.udp6_scrapes_handled += 1; - } - }); - - Ok(Response::from(ScrapeResponse { - transaction_id: request.transaction_id, - torrent_stats - })) -} - -fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { - let message = e.to_string(); - Response::from(ErrorResponse { transaction_id, message: message.into() }) -} diff --git a/src/torrust_udp_tracker/request.rs b/src/torrust_udp_tracker/request.rs deleted file mode 100644 index f3f67fdc1..000000000 --- a/src/torrust_udp_tracker/request.rs +++ /dev/null @@ -1,31 +0,0 @@ -use aquatic_udp_protocol::{AnnounceRequest}; -use crate::{InfoHash}; - -// struct AnnounceRequest { -// pub connection_id: i64, -// pub transaction_id: i32, -// pub info_hash: InfoHash, -// pub peer_id: PeerId, -// pub bytes_downloaded: Bytes, -// pub bytes_uploaded: Bytes, -// pub bytes_left: Bytes, -// pub event: AnnounceEvent, -// pub ip_address: Option, -// pub key: u32, -// pub peers_wanted: u32, -// pub port: Port -// } - -pub struct AnnounceRequestWrapper { - pub announce_request: AnnounceRequest, - pub info_hash: InfoHash, -} - -impl AnnounceRequestWrapper { - pub fn new(announce_request: AnnounceRequest) -> Self { - AnnounceRequestWrapper { - announce_request: announce_request.clone(), - info_hash: InfoHash(announce_request.info_hash.0) - } - } -} diff --git a/src/torrust_udp_tracker/server.rs b/src/torrust_udp_tracker/server.rs deleted file mode 100644 index cae1e5b94..000000000 --- a/src/torrust_udp_tracker/server.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::io::Cursor; -use std::net::{SocketAddr}; -use std::sync::Arc; -use aquatic_udp_protocol::{Response}; -use log::{debug, info}; -use tokio::net::UdpSocket; -use crate::{TorrentTracker}; -use crate::torrust_udp_tracker::{handle_packet, MAX_PACKET_SIZE}; - -pub struct UdpServer { - socket: Arc, - tracker: Arc, -} - -impl UdpServer { - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { - let socket = UdpSocket::bind(bind_address).await?; - - Ok(UdpServer { - socket: Arc::new(socket), - tracker, - }) - } - - pub async fn start(&self, rx: tokio::sync::watch::Receiver) { - loop { - let mut rx = rx.clone(); - let mut data = [0; MAX_PACKET_SIZE]; - let socket = self.socket.clone(); - let tracker = self.tracker.clone(); - - tokio::select! { - _ = rx.changed() => { - info!("Stopping UDP server: {}...", socket.local_addr().unwrap()); - break; - } - Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { - let payload = data[..valid_bytes].to_vec(); - - debug!("Received {} bytes from {}", payload.len(), remote_addr); - debug!("{:?}", payload); - - let response = handle_packet(remote_addr, payload, tracker).await; - UdpServer::send_response(socket, remote_addr, response).await; - } - } - } - } - - async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { - debug!("sending response to: {:?}", &remote_addr); - - let buffer = vec![0u8; MAX_PACKET_SIZE]; - let mut cursor = Cursor::new(buffer); - - match response.write(&mut cursor) { - Ok(_) => { - let position = cursor.position() as usize; - let inner = cursor.get_ref(); - - debug!("{:?}", &inner[..position]); - UdpServer::send_packet(socket, &remote_addr, &inner[..position]).await; - } - Err(_) => { debug!("could not write response to bytes."); } - } - } - - async fn send_packet(socket: Arc, remote_addr: &SocketAddr, payload: &[u8]) { - // doesn't matter if it reaches or not - let _ = socket.send_to(payload, remote_addr).await; - } -} diff --git a/src/tracker.rs b/src/tracker.rs deleted file mode 100644 index 98c5be730..000000000 --- a/src/tracker.rs +++ /dev/null @@ -1,501 +0,0 @@ -use serde::{Deserialize, Serialize}; -use serde; -use std::borrow::Cow; -use std::collections::BTreeMap; -use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use crate::common::{AnnounceEventDef, InfoHash, NumberOfBytesDef, PeerId}; -use std::net::{IpAddr, SocketAddr}; -use crate::{Configuration, key_manager, MAX_SCRAPE_TORRENTS}; -use std::collections::btree_map::Entry; -use crate::database::SqliteDatabase; -use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use log::{debug}; -use crate::key_manager::AuthKey; -use r2d2_sqlite::rusqlite; -use crate::torrust_http_tracker::AnnounceRequest; - -#[derive(Serialize, Deserialize, Clone, PartialEq)] -pub enum TrackerMode { - // Will track every new info hash and serve every peer. - #[serde(rename = "public")] - PublicMode, - - // Will only track whitelisted info hashes. - #[serde(rename = "listed")] - ListedMode, - - // Will only serve authenticated peers - #[serde(rename = "private")] - PrivateMode, - - // Will only track whitelisted info hashes and serve authenticated peers - #[serde(rename = "private_listed")] - PrivateListedMode, -} - -#[derive(PartialEq, Eq, Debug, Clone, Serialize)] -pub struct TorrentPeer { - pub peer_id: PeerId, - pub peer_addr: SocketAddr, - #[serde(serialize_with = "ser_instant")] - pub updated: std::time::Instant, - #[serde(with = "NumberOfBytesDef")] - pub uploaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub downloaded: NumberOfBytes, - #[serde(with = "NumberOfBytesDef")] - pub left: NumberOfBytes, - #[serde(with = "AnnounceEventDef")] - pub event: AnnounceEvent, -} - -impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - - TorrentPeer { - peer_id: PeerId(announce_request.peer_id.0), - peer_addr, - updated: std::time::Instant::now(), - uploaded: announce_request.bytes_uploaded, - downloaded: announce_request.bytes_downloaded, - left: announce_request.bytes_left, - event: announce_request.event - } - } - - pub fn from_http_announce_request(announce_request: &AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); - - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None - } - } else { - AnnounceEvent::None - }; - - TorrentPeer { - peer_id: announce_request.peer_id.clone(), - peer_addr, - updated: std::time::Instant::now(), - uploaded: NumberOfBytes(announce_request.uploaded as i64), - downloaded: NumberOfBytes(announce_request.downloaded as i64), - left: NumberOfBytes(announce_request.left as i64), - event - } - } - - // potentially substitute localhost ip with external ip - pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if remote_ip.is_loopback() && host_opt_ip.is_some() { - SocketAddr::new(host_opt_ip.unwrap(), port) - } else { - SocketAddr::new(remote_ip, port) - } - } - - fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } - - fn is_completed(&self) -> bool { - self.event == AnnounceEvent::Completed - } -} - -fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) -} - -#[derive(Serialize, Deserialize, Clone)] -pub struct TorrentEntry { - #[serde(skip)] - peers: std::collections::BTreeMap, - completed: u32, - #[serde(skip)] - seeders: u32, -} - -impl TorrentEntry { - pub fn new() -> TorrentEntry { - TorrentEntry { - peers: std::collections::BTreeMap::new(), - completed: 0, - seeders: 0, - } - } - - pub fn update_peer(&mut self, peer: &TorrentPeer) { - match peer.event { - AnnounceEvent::Stopped => { - let peer_old = self.peers.remove(&peer.peer_id); - self.update_torrent_stats_with_peer(peer, peer_old); - } - _ => { - let peer_old = self.peers.insert(peer.peer_id.clone(), peer.clone()); - self.update_torrent_stats_with_peer(peer, peer_old); - } - } - } - - pub fn get_peers(&self, remote_addr: Option<&std::net::SocketAddr>) -> Vec { - let mut list = Vec::new(); - for (_, peer) in self - .peers - .iter() - .filter(|e| match remote_addr { - // don't filter on ip_version - None => true, - // filter out different ip_version from remote_addr - Some(remote_address) => { - match e.1.peer_addr.ip() { - IpAddr::V4(_) => { remote_address.is_ipv4() } - IpAddr::V6(_) => { remote_address.is_ipv6() } - } - } - }) - .take(MAX_SCRAPE_TORRENTS as usize) - { - - // skip ip address of client - if let Some(remote_addr) = remote_addr { - if peer.peer_addr == *remote_addr { - continue; - } - } - - list.push(peer.clone()); - } - list - } - - pub fn update_torrent_stats_with_peer(&mut self, peer: &TorrentPeer, peer_old: Option) { - match peer_old { - None => { - if peer.is_seeder() { - self.seeders += 1; - } - - if peer.is_completed() { - self.completed += 1; - } - } - Some(peer_old) => { - match peer.event { - AnnounceEvent::None => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - } - AnnounceEvent::Completed => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - - // don't double count completed - if !peer_old.is_completed() { - self.completed += 1; - } - } - AnnounceEvent::Stopped => { - if peer_old.is_seeder() { - self.seeders -= 1; - } - } - // impossible, started should be the first time a peer announces itself - AnnounceEvent::Started => {} - } - } - } - } - - pub fn get_stats(&self) -> (u32, u32, u32) { - let leechers: u32 = if self.seeders < (self.peers.len() as u32) { - (self.peers.len() as u32) - self.seeders - } else { - 0 - }; - - (self.seeders, self.completed, leechers) - } -} - -#[derive(Serialize, Deserialize)] -struct DatabaseRow<'a> { - info_hash: InfoHash, - entry: Cow<'a, TorrentEntry>, -} - -#[derive(Debug)] -pub struct TorrentStats { - pub completed: u32, - pub seeders: u32, - pub leechers: u32, -} - -#[derive(Debug)] -pub enum TorrentError { - TorrentNotWhitelisted, - PeerNotAuthenticated, - PeerKeyNotValid, - NoPeersFound, - CouldNotSendResponse, - InvalidInfoHash, -} - -#[derive(Debug)] -pub struct TrackerStats { - pub tcp4_connections_handled: u64, - pub tcp4_announces_handled: u64, - pub tcp4_scrapes_handled: u64, - pub tcp6_connections_handled: u64, - pub tcp6_announces_handled: u64, - pub tcp6_scrapes_handled: u64, - pub udp4_connections_handled: u64, - pub udp4_announces_handled: u64, - pub udp4_scrapes_handled: u64, - pub udp6_connections_handled: u64, - pub udp6_announces_handled: u64, - pub udp6_scrapes_handled: u64, -} - -pub struct TorrentTracker { - pub config: Arc, - torrents: tokio::sync::RwLock>, - database: SqliteDatabase, - stats: tokio::sync::RwLock, -} - -impl TorrentTracker { - pub fn new(config: Arc) -> Result { - let database = SqliteDatabase::new(&config.db_path)?; - - Ok(TorrentTracker { - config, - torrents: RwLock::new(std::collections::BTreeMap::new()), - database, - stats: RwLock::new(TrackerStats { - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }), - }) - } - - fn is_public(&self) -> bool { - self.config.mode == TrackerMode::PublicMode - } - - fn is_private(&self) -> bool { - self.config.mode == TrackerMode::PrivateMode || self.config.mode == TrackerMode::PrivateListedMode - } - - fn is_whitelisted(&self) -> bool { - self.config.mode == TrackerMode::ListedMode || self.config.mode == TrackerMode::PrivateListedMode - } - - pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { - let auth_key = key_manager::generate_auth_key(seconds_valid); - - // add key to database - if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error) } - - Ok(auth_key) - } - - pub async fn remove_auth_key(&self, key: String) -> Result { - self.database.remove_key_from_keys(key).await - } - - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key_manager::Error> { - let db_key = self.database.get_key_from_keys(&auth_key.key).await?; - key_manager::verify_auth_key(&db_key) - } - - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { - // no authentication needed in public mode - if self.is_public() { return Ok(()) } - - // check if auth_key is set and valid - if self.is_private() { - match key { - Some(key) => { - if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) - } - } - None => { - return Err(TorrentError::PeerNotAuthenticated) - } - } - } - - // check if info_hash is whitelisted - if self.is_whitelisted() { - if self.is_info_hash_whitelisted(info_hash).await == false { - return Err(TorrentError::TorrentNotWhitelisted) - } - } - - Ok(()) - } - - // Loading the torrents into memory - pub async fn load_torrents(&self) -> Result<(), rusqlite::Error> { - let torrents = self.database.load_persistent_torrent_data().await?; - - for torrent in torrents { - self.add_torrent(torrent.0, 0, torrent.1, 0).await; - } - - Ok(()) - } - - // Saving the torrents from memory - pub async fn save_torrents(&self) -> Result<(), rusqlite::Error> { - let torrents = self.torrents.read().await; - self.database.save_persistent_torrent_data(&*torrents).await - } - - // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { - self.database.add_info_hash_to_whitelist(info_hash.clone()).await - } - - // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result { - self.database.remove_info_hash_from_whitelist(info_hash.clone()).await - } - - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - match self.database.get_info_hash_from_whitelist(&info_hash.to_string()).await { - Ok(_) => true, - Err(_) => false - } - } - - - pub async fn get_torrent_peers( - &self, - info_hash: &InfoHash, - peer_addr: &SocketAddr - ) -> Vec { - let read_lock = self.torrents.read().await; - match read_lock.get(info_hash) { - None => vec![], - Some(entry) => { - entry.get_peers(Some(peer_addr)) - } - } - } - - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { - let mut torrents = self.torrents.write().await; - - let torrent_entry = match torrents.entry(info_hash.clone()) { - Entry::Vacant(vacant) => { - vacant.insert(TorrentEntry::new()) - } - Entry::Occupied(entry) => { - entry.into_mut() - } - }; - - torrent_entry.update_peer(peer); - - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - TorrentStats { - seeders, - leechers, - completed, - } - } - - pub async fn add_torrent(&self, info_hash: InfoHash, seeders: u32, completed: u32, leechers: u32) -> TorrentStats { - let mut torrents = self.torrents.write().await; - - if !torrents.contains_key(&info_hash) { - let torrent_entry = TorrentEntry { - peers: Default::default(), - completed, - seeders - }; - torrents.insert(info_hash.clone(), torrent_entry); - } - - TorrentStats { - seeders, - completed, - leechers, - } - } - - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { - self.torrents.read().await - } - - pub async fn set_stats(&self) -> RwLockWriteGuard<'_, TrackerStats> { - self.stats.write().await - } - - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStats> { - self.stats.read().await - } - - // remove torrents without peers - pub async fn cleanup_torrents(&self) { - debug!("Cleaning torrents.."); - let mut lock = self.torrents.write().await; - let db: &mut BTreeMap = &mut *lock; - let mut torrents_to_remove = Vec::new(); - - for (k, torrent_entry) in db.iter_mut() { - // timed-out peers.. - { - let mut peers_to_remove = Vec::new(); - let torrent_peers = &mut torrent_entry.peers; - - for (peer_id, peer) in torrent_peers.iter() { - if peer.is_seeder() { - if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { - // remove seeders after 5 minutes since last update... - peers_to_remove.push(peer_id.clone()); - torrent_entry.seeders -= 1; - } - } else if peer.updated.elapsed() > std::time::Duration::from_secs(self.config.peer_timeout as u64) { - // remove peers after 2 hours since last update... - peers_to_remove.push(peer_id.clone()); - } - } - - for peer_id in peers_to_remove.iter() { - torrent_peers.remove(peer_id); - } - } - - if self.config.mode.clone() == TrackerMode::PublicMode && self.config.cleanup_peerless && !self.config.persistence { - // peer-less torrents.. - if torrent_entry.peers.len() == 0 { - torrents_to_remove.push(k.clone()); - } - } - } - - for info_hash in torrents_to_remove { - db.remove(&info_hash); - } - } -} diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs new file mode 100644 index 000000000..31e1f50e4 --- /dev/null +++ b/src/tracker/auth.rs @@ -0,0 +1,200 @@ +use std::panic::Location; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use derive_more::Display; +use log::debug; +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use torrust_tracker_located_error::LocatedError; + +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; + +#[must_use] +/// # Panics +/// +/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. +pub fn generate(lifetime: Duration) -> ExpiringKey { + let random_id: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(AUTH_KEY_LENGTH) + .map(char::from) + .collect(); + + debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); + + ExpiringKey { + key: random_id.parse::().unwrap(), + valid_until: Current::add(&lifetime).unwrap(), + } +} + +/// # Errors +/// +/// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. +/// +/// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. +pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { + let current_time: DurationSinceUnixEpoch = Current::now(); + + if auth_key.valid_until < current_time { + Err(Error::KeyExpired { + location: Location::caller(), + }) + } else { + Ok(()) + } +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] +pub struct ExpiringKey { + pub key: Key, + pub valid_until: DurationSinceUnixEpoch, +} + +impl std::fmt::Display for ExpiringKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "key: `{}`, valid until `{}`", self.key, self.expiry_time()) + } +} + +impl ExpiringKey { + #[must_use] + pub fn key(&self) -> Key { + self.key.clone() + } + + /// It returns the expiry time. For example, for the starting time for Unix Epoch + /// (timestamp 0) it will return a `DateTime` whose string representation is + /// `1970-01-01 00:00:00 UTC`. + /// + /// # Panics + /// + /// Will panic when the key timestamp overflows the ui64 type. + /// + #[must_use] + pub fn expiry_time(&self) -> chrono::DateTime { + convert_from_timestamp_to_datetime_utc(self.valid_until) + } +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] +pub struct Key(String); + +#[derive(Debug, PartialEq, Eq)] +pub struct ParseKeyError; + +impl FromStr for Key { + type Err = ParseKeyError; + + fn from_str(s: &str) -> Result { + if s.len() != AUTH_KEY_LENGTH { + return Err(ParseKeyError); + } + + Ok(Self(s.to_string())) + } +} + +#[derive(Debug, Error)] +#[allow(dead_code)] +pub enum Error { + #[error("Key could not be verified: {source}")] + KeyVerificationError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + #[error("Failed to read key: {key}, {location}")] + UnableToReadKey { + location: &'static Location<'static>, + key: Box, + }, + #[error("Key has expired, {location}")] + KeyExpired { location: &'static Location<'static> }, +} + +impl From for Error { + fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { + Error::KeyVerificationError { + source: (Arc::new(e) as Arc).into(), + } + } +} + +#[cfg(test)] +mod tests { + + mod key { + use std::str::FromStr; + + use crate::tracker::auth::Key; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let key = Key::from_str(key_string); + + assert!(key.is_ok()); + assert_eq!(key.unwrap().to_string(), key_string); + } + } + + mod expiring_auth_key { + use std::str::FromStr; + use std::time::Duration; + + use crate::shared::clock::{Current, StoppedTime}; + use crate::tracker::auth; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let auth_key = auth::Key::from_str(key_string); + + assert!(auth_key.is_ok()); + assert_eq!(auth_key.unwrap().to_string(), key_string); + } + + #[test] + fn should_be_displayed() { + // Set the time to the current time. + Current::local_set_to_unix_epoch(); + + let expiring_key = auth::generate(Duration::from_secs(0)); + + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line + ); + } + + #[test] + fn should_be_generated_with_a_expiration_time() { + let expiring_key = auth::generate(Duration::new(9999, 0)); + + assert!(auth::verify(&expiring_key).is_ok()); + } + + #[test] + fn should_be_generate_and_verified() { + // Set the time to the current time. + Current::local_set_to_system_time_now(); + + // Make key that is valid for 19 seconds. + let expiring_key = auth::generate(Duration::from_secs(19)); + + // Mock the time has passed 10 sec. + Current::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify(&expiring_key).is_ok()); + + // Mock the time has passed another 10 sec. + Current::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify(&expiring_key).is_err()); + } + } +} diff --git a/src/tracker/databases/driver.rs b/src/tracker/databases/driver.rs new file mode 100644 index 000000000..4ce6ea515 --- /dev/null +++ b/src/tracker/databases/driver.rs @@ -0,0 +1,22 @@ +use torrust_tracker_primitives::DatabaseDriver; + +use super::error::Error; +use super::mysql::Mysql; +use super::sqlite::Sqlite; +use super::{Builder, Database}; + +/// . +/// +/// # Errors +/// +/// This function will return an error if unable to connect to the database. +pub fn build(driver: &DatabaseDriver, db_path: &str) -> Result, Error> { + let database = match driver { + DatabaseDriver::Sqlite3 => Builder::::build(db_path), + DatabaseDriver::MySQL => Builder::::build(db_path), + }?; + + database.create_database_tables().expect("Could not create database tables."); + + Ok(database) +} diff --git a/src/tracker/databases/error.rs b/src/tracker/databases/error.rs new file mode 100644 index 000000000..68b732190 --- /dev/null +++ b/src/tracker/databases/error.rs @@ -0,0 +1,94 @@ +use std::panic::Location; +use std::sync::Arc; + +use r2d2_mysql::mysql::UrlError; +use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::DatabaseDriver; + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("The {driver} query unexpectedly returned nothing: {source}")] + QueryReturnedNoRows { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + driver: DatabaseDriver, + }, + + #[error("The {driver} query was malformed: {source}")] + InvalidQuery { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + driver: DatabaseDriver, + }, + + #[error("Unable to insert record into {driver} database, {location}")] + InsertFailed { + location: &'static Location<'static>, + driver: DatabaseDriver, + }, + + #[error("Failed to remove record from {driver} database, error-code: {error_code}, {location}")] + DeleteFailed { + location: &'static Location<'static>, + error_code: usize, + driver: DatabaseDriver, + }, + + #[error("Failed to connect to {driver} database: {source}")] + ConnectionError { + source: LocatedError<'static, UrlError>, + driver: DatabaseDriver, + }, + + #[error("Failed to create r2d2 {driver} connection pool: {source}")] + ConnectionPool { + source: LocatedError<'static, r2d2::Error>, + driver: DatabaseDriver, + }, +} + +impl From for Error { + #[track_caller] + fn from(err: r2d2_sqlite::rusqlite::Error) -> Self { + match err { + r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows { + source: (Arc::new(err) as Arc).into(), + driver: DatabaseDriver::Sqlite3, + }, + _ => Error::InvalidQuery { + source: (Arc::new(err) as Arc).into(), + driver: DatabaseDriver::Sqlite3, + }, + } + } +} + +impl From for Error { + #[track_caller] + fn from(err: r2d2_mysql::mysql::Error) -> Self { + let e: Arc = Arc::new(err); + Error::InvalidQuery { + source: e.into(), + driver: DatabaseDriver::MySQL, + } + } +} + +impl From for Error { + #[track_caller] + fn from(err: UrlError) -> Self { + Self::ConnectionError { + source: Located(err).into(), + driver: DatabaseDriver::MySQL, + } + } +} + +impl From<(r2d2::Error, DatabaseDriver)> for Error { + #[track_caller] + fn from(e: (r2d2::Error, DatabaseDriver)) -> Self { + let (err, driver) = e; + Self::ConnectionPool { + source: Located(err).into(), + driver, + } + } +} diff --git a/src/tracker/databases/mod.rs b/src/tracker/databases/mod.rs new file mode 100644 index 000000000..f68288bbe --- /dev/null +++ b/src/tracker/databases/mod.rs @@ -0,0 +1,81 @@ +pub mod driver; +pub mod error; +pub mod mysql; +pub mod sqlite; + +use std::marker::PhantomData; + +use async_trait::async_trait; + +use self::error::Error; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::tracker::auth::{self, Key}; + +pub(self) struct Builder +where + T: Database, +{ + phantom: PhantomData, +} + +impl Builder +where + T: Database + 'static, +{ + /// . + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create a database. + pub(self) fn build(db_path: &str) -> Result, Error> { + Ok(Box::new(T::new(db_path)?)) + } +} + +#[async_trait] +pub trait Database: Sync + Send { + /// . + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create a database. + fn new(db_path: &str) -> Result + where + Self: std::marker::Sized; + + /// . + /// + /// # Errors + /// + /// Will return `Error` if unable to create own tables. + fn create_database_tables(&self) -> Result<(), Error>; + + /// # Errors + /// + /// Will return `Err` if unable to drop tables. + fn drop_database_tables(&self) -> Result<(), Error>; + + async fn load_persistent_torrents(&self) -> Result, Error>; + + async fn load_keys(&self) -> Result, Error>; + + async fn load_whitelist(&self) -> Result, Error>; + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; + + async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error>; + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + + async fn get_key_from_keys(&self, key: &Key) -> Result, Error>; + + async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; + + async fn remove_key_from_keys(&self, key: &Key) -> Result; + + async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { + Ok(self.get_info_hash_from_whitelist(info_hash).await?.is_some()) + } +} diff --git a/src/tracker/databases/mysql.rs b/src/tracker/databases/mysql.rs new file mode 100644 index 000000000..7e4aab99e --- /dev/null +++ b/src/tracker/databases/mysql.rs @@ -0,0 +1,223 @@ +use std::str::FromStr; +use std::time::Duration; + +use async_trait::async_trait; +use log::debug; +use r2d2::Pool; +use r2d2_mysql::mysql::prelude::Queryable; +use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; +use r2d2_mysql::MySqlConnectionManager; +use torrust_tracker_primitives::DatabaseDriver; + +use super::{Database, Error}; +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::tracker::auth::{self, Key}; + +const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; + +pub struct Mysql { + pool: Pool, +} + +#[async_trait] +impl Database for Mysql { + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `MySQL` database. + fn new(db_path: &str) -> Result { + let opts = Opts::from_url(db_path)?; + let builder = OptsBuilder::from_opts(opts); + let manager = MySqlConnectionManager::new(builder); + let pool = r2d2::Pool::builder().build(manager).map_err(|e| (e, DRIVER))?; + + Ok(Self { pool }) + } + + fn create_database_tables(&self) -> Result<(), Error> { + let create_whitelist_table = " + CREATE TABLE IF NOT EXISTS whitelist ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE + );" + .to_string(); + + let create_torrents_table = " + CREATE TABLE IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + + let create_keys_table = format!( + " + CREATE TABLE IF NOT EXISTS `keys` ( + `id` INT NOT NULL AUTO_INCREMENT, + `key` VARCHAR({}) NOT NULL, + `valid_until` INT(10) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE (`key`) + );", + i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") + ); + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.query_drop(&create_torrents_table) + .expect("Could not create torrents table."); + conn.query_drop(&create_keys_table).expect("Could not create keys table."); + conn.query_drop(&create_whitelist_table) + .expect("Could not create whitelist table."); + + Ok(()) + } + + fn drop_database_tables(&self) -> Result<(), Error> { + let drop_whitelist_table = " + DROP TABLE `whitelist`;" + .to_string(); + + let drop_torrents_table = " + DROP TABLE `torrents`;" + .to_string(); + + let drop_keys_table = " + DROP TABLE `keys`;" + .to_string(); + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.query_drop(&drop_whitelist_table) + .expect("Could not drop `whitelist` table."); + conn.query_drop(&drop_torrents_table) + .expect("Could not drop `torrents` table."); + conn.query_drop(&drop_keys_table).expect("Could not drop `keys` table."); + + Ok(()) + } + + async fn load_persistent_torrents(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let torrents = conn.query_map( + "SELECT info_hash, completed FROM torrents", + |(info_hash_string, completed): (String, u32)| { + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + (info_hash, completed) + }, + )?; + + Ok(torrents) + } + + async fn load_keys(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let keys = conn.query_map( + "SELECT `key`, valid_until FROM `keys`", + |(key, valid_until): (String, i64)| auth::ExpiringKey { + key: key.parse::().unwrap(), + valid_until: Duration::from_secs(valid_until.unsigned_abs()), + }, + )?; + + Ok(keys) + } + + async fn load_whitelist(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hashes = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { + InfoHash::from_str(&info_hash).unwrap() + })?; + + Ok(info_hashes) + } + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hash_str = info_hash.to_string(); + + debug!("{}", info_hash_str); + + Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) + } + + async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let select = conn.exec_first::( + "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", + params! { "info_hash" => info_hash.to_hex_string() }, + )?; + + let info_hash = select.map(|f| InfoHash::from_str(&f).expect("Failed to decode InfoHash String from DB!")); + + Ok(info_hash) + } + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hash_str = info_hash.to_string(); + + conn.exec_drop( + "INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", + params! { info_hash_str }, + )?; + + Ok(1) + } + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hash = info_hash.to_string(); + + conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash })?; + + Ok(1) + } + + async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let query = conn.exec_first::<(String, i64), _, _>( + "SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", + params! { "key" => key.to_string() }, + ); + + let key = query?; + + Ok(key.map(|(key, expiry)| auth::ExpiringKey { + key: key.parse::().unwrap(), + valid_until: Duration::from_secs(expiry.unsigned_abs()), + })) + } + + async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let key = auth_key.key.to_string(); + let valid_until = auth_key.valid_until.as_secs().to_string(); + + conn.exec_drop( + "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", + params! { key, valid_until }, + )?; + + Ok(1) + } + + async fn remove_key_from_keys(&self, key: &Key) -> Result { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { "key" => key.to_string() })?; + + Ok(1) + } +} diff --git a/src/tracker/databases/sqlite.rs b/src/tracker/databases/sqlite.rs new file mode 100644 index 000000000..931289183 --- /dev/null +++ b/src/tracker/databases/sqlite.rs @@ -0,0 +1,256 @@ +use std::panic::Location; +use std::str::FromStr; + +use async_trait::async_trait; +use r2d2::Pool; +use r2d2_sqlite::SqliteConnectionManager; +use torrust_tracker_primitives::DatabaseDriver; + +use super::{Database, Error}; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::shared::clock::DurationSinceUnixEpoch; +use crate::tracker::auth::{self, Key}; + +const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; + +pub struct Sqlite { + pool: Pool, +} + +#[async_trait] +impl Database for Sqlite { + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. + fn new(db_path: &str) -> Result { + let cm = SqliteConnectionManager::file(db_path); + Pool::new(cm).map_or_else(|err| Err((err, DatabaseDriver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) + } + + fn create_database_tables(&self) -> Result<(), Error> { + let create_whitelist_table = " + CREATE TABLE IF NOT EXISTS whitelist ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE + );" + .to_string(); + + let create_torrents_table = " + CREATE TABLE IF NOT EXISTS torrents ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + + let create_keys_table = " + CREATE TABLE IF NOT EXISTS keys ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER NOT NULL + );" + .to_string(); + + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.execute(&create_whitelist_table, [])?; + conn.execute(&create_keys_table, [])?; + conn.execute(&create_torrents_table, [])?; + + Ok(()) + } + + fn drop_database_tables(&self) -> Result<(), Error> { + let drop_whitelist_table = " + DROP TABLE whitelist;" + .to_string(); + + let drop_torrents_table = " + DROP TABLE torrents;" + .to_string(); + + let drop_keys_table = " + DROP TABLE keys;" + .to_string(); + + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.execute(&drop_whitelist_table, []) + .and_then(|_| conn.execute(&drop_torrents_table, [])) + .and_then(|_| conn.execute(&drop_keys_table, []))?; + + Ok(()) + } + + async fn load_persistent_torrents(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; + + let torrent_iter = stmt.query_map([], |row| { + let info_hash_string: String = row.get(0)?; + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + let completed: u32 = row.get(1)?; + Ok((info_hash, completed)) + })?; + + //torrent_iter?; + //let torrent_iter = torrent_iter.unwrap(); + + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); + + Ok(torrents) + } + + async fn load_keys(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; + + let keys_iter = stmt.query_map([], |row| { + let key: String = row.get(0)?; + let valid_until: i64 = row.get(1)?; + + Ok(auth::ExpiringKey { + key: key.parse::().unwrap(), + valid_until: DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs()), + }) + })?; + + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + + Ok(keys) + } + + async fn load_whitelist(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; + + let info_hash_iter = stmt.query_map([], |row| { + let info_hash: String = row.get(0)?; + + Ok(InfoHash::from_str(&info_hash).unwrap()) + })?; + + let info_hashes: Vec = info_hash_iter.filter_map(std::result::Result::ok).collect(); + + Ok(info_hashes) + } + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute( + "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", + [info_hash.to_string(), completed.to_string()], + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(()) + } + } + + async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; + + let mut rows = stmt.query([info_hash.to_hex_string()])?; + + let query = rows.next()?; + + Ok(query.map(|f| InfoHash::from_str(&f.get_unwrap::<_, String>(0)).unwrap())) + } + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()])?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(insert) + } + } + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let deleted = conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()])?; + + if deleted == 1 { + // should only remove a single record. + Ok(deleted) + } else { + Err(Error::DeleteFailed { + location: Location::caller(), + error_code: deleted, + driver: DRIVER, + }) + } + } + + async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; + + let mut rows = stmt.query([key.to_string()])?; + + let key = rows.next()?; + + Ok(key.map(|f| { + let expiry: i64 = f.get(1).unwrap(); + let key: String = f.get(0).unwrap(); + auth::ExpiringKey { + key: key.parse::().unwrap(), + valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), + } + })) + } + + async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute( + "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + [auth_key.key.to_string(), auth_key.valid_until.as_secs().to_string()], + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(insert) + } + } + + async fn remove_key_from_keys(&self, key: &Key) -> Result { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key.to_string()])?; + + if deleted == 1 { + // should only remove a single record. + Ok(deleted) + } else { + Err(Error::DeleteFailed { + location: Location::caller(), + error_code: deleted, + driver: DRIVER, + }) + } + } +} diff --git a/src/tracker/error.rs b/src/tracker/error.rs new file mode 100644 index 000000000..aaf755e0d --- /dev/null +++ b/src/tracker/error.rs @@ -0,0 +1,22 @@ +use std::panic::Location; + +use torrust_tracker_located_error::LocatedError; + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + // Authentication errors + #[error("The supplied key: {key:?}, is not valid: {source}")] + PeerKeyNotValid { + key: super::auth::Key, + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + #[error("The peer is not authenticated, {location}")] + PeerNotAuthenticated { location: &'static Location<'static> }, + + // Authorization errors + #[error("The torrent: {info_hash}, is not whitelisted, {location}")] + TorrentNotWhitelisted { + info_hash: crate::shared::bit_torrent::info_hash::InfoHash, + location: &'static Location<'static>, + }, +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs new file mode 100644 index 000000000..a89d6df2c --- /dev/null +++ b/src/tracker/mod.rs @@ -0,0 +1,1253 @@ +pub mod auth; +pub mod databases; +pub mod error; +pub mod peer; +pub mod services; +pub mod statistics; +pub mod torrent; + +use std::collections::btree_map::Entry; +use std::collections::{BTreeMap, HashMap}; +use std::net::IpAddr; +use std::panic::Location; +use std::sync::Arc; +use std::time::Duration; + +use tokio::sync::mpsc::error::SendError; +use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_configuration::Configuration; +use torrust_tracker_primitives::TrackerMode; + +use self::auth::Key; +use self::error::Error; +use self::peer::Peer; +use self::torrent::{SwarmMetadata, SwarmStats}; +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::tracker::databases::Database; + +pub struct Tracker { + pub config: Arc, + mode: TrackerMode, + keys: RwLock>, + whitelist: RwLock>, + torrents: RwLock>, + stats_event_sender: Option>, + stats_repository: statistics::Repo, + pub database: Box, +} + +#[derive(Debug, PartialEq, Default)] +pub struct TorrentsMetrics { + // code-review: consider using `SwarmStats` for + // `seeders`, `completed`, and `leechers` attributes. + // pub swarm_stats: SwarmStats; + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + pub torrents: u64, +} + +#[derive(Debug, PartialEq, Default)] +pub struct AnnounceData { + pub peers: Vec, + pub swarm_stats: SwarmStats, + pub interval: u32, + pub interval_min: u32, +} + +#[derive(Debug, PartialEq, Default)] +pub struct ScrapeData { + pub files: HashMap, +} + +impl ScrapeData { + #[must_use] + pub fn empty() -> Self { + let files: HashMap = HashMap::new(); + Self { files } + } + + #[must_use] + pub fn zeroed(info_hashes: &Vec) -> Self { + let mut scrape_data = Self::empty(); + + for info_hash in info_hashes { + scrape_data.add_file(info_hash, SwarmMetadata::zeroed()); + } + + scrape_data + } + + pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) { + self.files.insert(*info_hash, swarm_metadata); + } + + pub fn add_file_with_zeroed_metadata(&mut self, info_hash: &InfoHash) { + self.files.insert(*info_hash, SwarmMetadata::zeroed()); + } +} + +impl Tracker { + /// # Errors + /// + /// Will return a `databases::error::Error` if unable to connect to database. + pub fn new( + config: Arc, + stats_event_sender: Option>, + stats_repository: statistics::Repo, + ) -> Result { + let database = databases::driver::build(&config.db_driver, &config.db_path)?; + + let mode = config.mode; + + Ok(Tracker { + config, + mode, + keys: RwLock::new(std::collections::HashMap::new()), + whitelist: RwLock::new(std::collections::HashSet::new()), + torrents: RwLock::new(std::collections::BTreeMap::new()), + stats_event_sender, + stats_repository, + database, + }) + } + + pub fn is_public(&self) -> bool { + self.mode == TrackerMode::Public + } + + pub fn is_private(&self) -> bool { + self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed + } + + pub fn is_whitelisted(&self) -> bool { + self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed + } + + pub fn requires_authentication(&self) -> bool { + self.is_private() + } + + /// It handles an announce request. + /// + /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). + pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { + // code-review: maybe instead of mutating the peer we could just return + // a tuple with the new peer and the announce data: (Peer, AnnounceData). + // It could even be a different struct: `StoredPeer` or `PublicPeer`. + + // code-review: in the `scrape` function we perform an authorization check. + // We check if the torrent is whitelisted. Should we also check authorization here? + // I think so because the `Tracker` has the responsibility for checking authentication and authorization. + // The `Tracker` has delegated that responsibility to the handlers + // (because we want to return a friendly error response) but that does not mean we should + // double-check authorization at this domain level too. + // I would propose to return a `Result` here. + // Besides, regarding authentication the `Tracker` is also responsible for authentication but + // we are actually handling authentication at the handlers level. So I would extract that + // responsibility into another authentication service. + + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); + + let swarm_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + + let peers = self.get_peers_for_peer(info_hash, peer).await; + + AnnounceData { + peers, + swarm_stats, + interval: self.config.announce_interval, + interval_min: self.config.min_announce_interval, + } + } + + /// It handles a scrape request. + /// + /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). + pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { + let mut scrape_data = ScrapeData::empty(); + + for info_hash in info_hashes { + let swarm_metadata = match self.authorize(info_hash).await { + Ok(_) => self.get_swarm_metadata(info_hash).await, + Err(_) => SwarmMetadata::zeroed(), + }; + scrape_data.add_file(info_hash, swarm_metadata); + } + + scrape_data + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + let torrents = self.get_torrents().await; + match torrents.get(info_hash) { + Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + None => SwarmMetadata::default(), + } + } + + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = auth::generate(lifetime); + self.database.add_key_to_keys(&auth_key).await?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `key` to the database. + /// + /// # Panics + /// + /// Will panic if key cannot be converted into a valid `Key`. + pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { + self.database.remove_key_from_keys(key).await?; + self.keys.write().await.remove(key); + Ok(()) + } + + /// # Errors + /// + /// Will return a `key::Error` if unable to get any `auth_key`. + pub async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> { + // code-review: this function is public only because it's used in a test. + // We should change the test and make it private. + match self.keys.read().await.get(key) { + None => Err(auth::Error::UnableToReadKey { + location: Location::caller(), + key: Box::new(key.clone()), + }), + Some(key) => auth::verify(key), + } + } + + /// # Errors + /// + /// Will return a `database::Error` if unable to `load_keys` from the database. + pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { + let keys_from_database = self.database.load_keys().await?; + let mut keys = self.keys.write().await; + + keys.clear(); + + for key in keys_from_database { + keys.insert(key.key.clone(), key); + } + + Ok(()) + } + + /// Adding torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.add_torrent_to_database_whitelist(info_hash).await?; + self.add_torrent_to_memory_whitelist(info_hash).await; + Ok(()) + } + + /// It adds a torrent to the whitelist if it has not been whitelisted previously + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + + if is_whitelisted { + return Ok(()); + } + + self.database.add_info_hash_to_whitelist(*info_hash).await?; + + Ok(()) + } + + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + + /// Removing torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.remove_torrent_from_database_whitelist(info_hash).await?; + self.remove_torrent_from_memory_whitelist(info_hash).await; + Ok(()) + } + + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + + if !is_whitelisted { + return Ok(()); + } + + self.database.remove_info_hash_from_whitelist(*info_hash).await?; + + Ok(()) + } + + pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.remove(info_hash) + } + + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _ = whitelist.insert(info_hash); + } + + Ok(()) + } + + /// # Errors + /// + /// Will return a `torrent::Error::PeerKeyNotValid` if the `key` is not valid. + /// + /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. + /// + /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { + // todo: this is a deprecated method. + // We're splitting authentication and authorization responsibilities. + // Use `authenticate` and `authorize` instead. + + // Authentication + + // no authentication needed in public mode + if self.is_public() { + return Ok(()); + } + + // check if auth_key is set and valid + if self.is_private() { + match key { + Some(key) => { + if let Err(e) = self.verify_auth_key(key).await { + return Err(Error::PeerKeyNotValid { + key: key.clone(), + source: (Arc::new(e) as Arc).into(), + }); + } + } + None => { + return Err(Error::PeerNotAuthenticated { + location: Location::caller(), + }); + } + } + } + + // Authorization + + // check if info_hash is whitelisted + if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { + return Err(Error::TorrentNotWhitelisted { + info_hash: *info_hash, + location: Location::caller(), + }); + } + + Ok(()) + } + + /// # Errors + /// + /// Will return an error if the the authentication key cannot be verified. + pub async fn authenticate(&self, key: &Key) -> Result<(), auth::Error> { + if self.is_private() { + self.verify_auth_key(key).await + } else { + Ok(()) + } + } + + /// The only authorization process is the whitelist. + /// + /// # Errors + /// + /// Will return an error if the tracker is running in `listed` mode + /// and the infohash is not whitelisted. + pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { + if !self.is_whitelisted() { + return Ok(()); + } + + if self.is_info_hash_whitelisted(info_hash).await { + return Ok(()); + } + + return Err(Error::TorrentNotWhitelisted { + info_hash: *info_hash, + location: Location::caller(), + }); + } + + /// Loading the torrents from database into memory + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. + pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + let persistent_torrents = self.database.load_persistent_torrents().await?; + + let mut torrents = self.torrents.write().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(&info_hash) { + continue; + } + + let torrent_entry = torrent::Entry { + peers: BTreeMap::default(), + completed, + }; + + torrents.insert(info_hash, torrent_entry); + } + + Ok(()) + } + + async fn get_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers_for_peer(peer).into_iter().copied().collect(), + } + } + + /// Get all torrent peers for a given torrent + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_all_peers().into_iter().copied().collect(), + } + } + + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { + // code-review: consider splitting the function in two (command and query segregation). + // `update_torrent_with_peer` and `get_stats` + + let mut torrents = self.torrents.write().await; + + let torrent_entry = match torrents.entry(*info_hash) { + Entry::Vacant(vacant) => vacant.insert(torrent::Entry::new()), + Entry::Occupied(entry) => entry.into_mut(), + }; + + let stats_updated = torrent_entry.update_peer(peer); + + // todo: move this action to a separate worker + if self.config.persistent_torrent_completed_stat && stats_updated { + let _ = self + .database + .save_persistent_torrent(info_hash, torrent_entry.completed) + .await; + } + + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + torrent::SwarmStats { + completed, + seeders, + leechers, + } + } + + pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { + self.torrents.read().await + } + + pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { + let mut torrents_metrics = TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 0, + torrents: 0, + }; + + let db = self.get_torrents().await; + + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + torrents_metrics.seeders += u64::from(seeders); + torrents_metrics.completed += u64::from(completed); + torrents_metrics.leechers += u64::from(leechers); + torrents_metrics.torrents += 1; + }); + + torrents_metrics + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, statistics::Metrics> { + self.stats_repository.get_stats().await + } + + pub async fn send_stats_event(&self, event: statistics::Event) -> Option>> { + match &self.stats_event_sender { + None => None, + Some(stats_event_sender) => stats_event_sender.send_event(event).await, + } + } + + // Remove inactive peers and (optionally) peerless torrents + pub async fn cleanup_torrents(&self) { + let mut torrents_lock = self.torrents.write().await; + + // If we don't need to remove torrents we will use the faster iter + if self.config.remove_peerless_torrents { + torrents_lock.retain(|_, torrent_entry| { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + + if self.config.persistent_torrent_completed_stat { + torrent_entry.completed > 0 || !torrent_entry.peers.is_empty() + } else { + !torrent_entry.peers.is_empty() + } + }); + } else { + for (_, torrent_entry) in torrents_lock.iter_mut() { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + } + } + } +} + +#[must_use] +fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { + if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { + host_ip + } else { + *remote_client_ip + } +} + +#[cfg(test)] +mod tests { + + mod the_tracker { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_test_helpers::configuration; + + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::shared::clock::DurationSinceUnixEpoch; + use crate::tracker::peer::{self, Peer}; + use crate::tracker::services::tracker_factory; + use crate::tracker::{TorrentsMetrics, Tracker}; + + fn public_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_public().into()) + } + + fn private_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_private().into()) + } + + fn whitelisted_tracker() -> Tracker { + tracker_factory(configuration::ephemeral_mode_whitelisted().into()) + } + + pub fn tracker_persisting_torrents_in_database() -> Tracker { + let mut configuration = configuration::ephemeral(); + configuration.persistent_torrent_completed_stat = true; + tracker_factory(Arc::new(configuration)) + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + // The client peer IP + fn peer_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) + } + + /// Sample peer whose state is not relevant for the tests + fn sample_peer() -> Peer { + complete_peer() + } + + /// Sample peer when for tests that need more than one peer + fn sample_peer_1() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Completed, + } + } + + /// Sample peer when for tests that need more than one peer + fn sample_peer_2() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Completed, + } + } + + fn seeder() -> Peer { + complete_peer() + } + + fn leecher() -> Peer { + incomplete_peer() + } + + fn started_peer() -> Peer { + incomplete_peer() + } + + fn completed_peer() -> Peer { + complete_peer() + } + + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. + fn complete_peer() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + /// A peer that counts as `incomplete` is swarm metadata + fn incomplete_peer() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(1000), // Still bytes to download + event: AnnounceEvent::Started, + } + } + + #[tokio::test] + async fn should_collect_torrent_metrics() { + let tracker = public_tracker(); + + let torrents_metrics = tracker.get_torrents_metrics().await; + + assert_eq!( + torrents_metrics, + TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 0, + torrents: 0 + } + ); + } + + #[tokio::test] + async fn it_should_return_all_the_peers_for_a_given_torrent() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + + let peers = tracker.get_all_torrent_peers(&info_hash).await; + + assert_eq!(peers, vec![peer]); + } + + #[tokio::test] + async fn it_should_return_all_the_peers_for_a_given_torrent_excluding_a_given_peer() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + + let peers = tracker.get_peers_for_peer(&info_hash, &peer).await; + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_the_torrent_metrics() { + let tracker = public_tracker(); + + tracker + .update_torrent_with_peer_and_get_stats(&sample_info_hash(), &leecher()) + .await; + + let torrent_metrics = tracker.get_torrents_metrics().await; + + assert_eq!( + torrent_metrics, + TorrentsMetrics { + seeders: 0, + completed: 0, + leechers: 1, + torrents: 1, + } + ); + } + + mod for_all_config_modes { + + mod handling_an_announce_request { + + use crate::tracker::tests::the_tracker::{ + peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, + }; + + mod should_assign_the_ip_to_the_peer { + + use std::net::{IpAddr, Ipv4Addr}; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + mod and_when_the_client_ip_is_a_ipv4_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( + ) { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + + mod and_when_client_ip_is_a_ipv6_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::tracker::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( + ) { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + } + + #[tokio::test] + async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { + let tracker = public_tracker(); + + let mut peer = sample_peer(); + + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + + assert_eq!(announce_data.peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { + let tracker = public_tracker(); + + let mut previously_announced_peer = sample_peer_1(); + tracker + .announce(&sample_info_hash(), &mut previously_announced_peer, &peer_ip()) + .await; + + let mut peer = sample_peer_2(); + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + + assert_eq!(announce_data.peers, vec![previously_announced_peer]); + } + + mod it_should_update_the_swarm_stats_for_the_torrent { + + use crate::tracker::tests::the_tracker::{ + completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, + }; + + #[tokio::test] + async fn when_the_peer_is_a_seeder() { + let tracker = public_tracker(); + + let mut peer = seeder(); + + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + + assert_eq!(announce_data.swarm_stats.seeders, 1); + } + + #[tokio::test] + async fn when_the_peer_is_a_leecher() { + let tracker = public_tracker(); + + let mut peer = leecher(); + + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + + assert_eq!(announce_data.swarm_stats.leechers, 1); + } + + #[tokio::test] + async fn when_a_previously_announced_started_peer_has_completed_downloading() { + let tracker = public_tracker(); + + // We have to announce with "started" event because peer does not count if peer was not previously known + let mut started_peer = started_peer(); + tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip()).await; + + let mut completed_peer = completed_peer(); + let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()).await; + + assert_eq!(announce_data.swarm_stats.completed, 1); + } + } + } + + mod handling_a_scrape_request { + + use std::net::{IpAddr, Ipv4Addr}; + + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; + use crate::tracker::{ScrapeData, SwarmMetadata}; + + #[tokio::test] + async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( + ) { + let tracker = public_tracker(); + + let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; + + let scrape_data = tracker.scrape(&info_hashes).await; + + let mut expected_scrape_data = ScrapeData::empty(); + + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { + let tracker = public_tracker(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + // Announce a "complete" peer for the torrent + let mut complete_peer = complete_peer(); + tracker + .announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))) + .await; + + // Announce an "incomplete" peer for the torrent + let mut incomplete_peer = incomplete_peer(); + tracker + .announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))) + .await; + + // Scrape + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 0, // the "complete" peer does not count because it was not previously known + downloaded: 0, + incomplete: 1, // the "incomplete" peer we have just announced + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_allow_scraping_for_multiple_torrents() { + let tracker = public_tracker(); + + let info_hashes = vec![ + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), + ]; + + let scrape_data = tracker.scrape(&info_hashes).await; + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[1]); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + } + + mod configured_as_whitelisted { + + mod handling_authorization { + use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = tracker.add_torrent_to_whitelist(&info_hash).await; + assert!(result.is_ok()); + + let result = tracker.authorize(&info_hash).await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = tracker.authorize(&info_hash).await; + assert!(result.is_err()); + } + } + + mod handling_the_torrent_whitelist { + use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_add_a_torrent_to_the_whitelist() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + } + + #[tokio::test] + async fn it_should_remove_a_torrent_from_the_whitelist() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + tracker.remove_torrent_from_whitelist(&info_hash).await.unwrap(); + + assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + } + + mod persistence { + use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_load_the_whitelist_from_the_database() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + // Remove torrent from the in-memory whitelist + tracker.whitelist.write().await.remove(&info_hash); + assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + + tracker.load_whitelist_from_database().await.unwrap(); + + assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + } + } + } + + mod handling_an_announce_request {} + + mod handling_an_scrape_request { + + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::tests::the_tracker::{ + complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, + }; + use crate::tracker::torrent::SwarmMetadata; + use crate::tracker::ScrapeData; + + #[test] + fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { + // Zeroed scrape data is used when the authentication for the scrape request fails. + + let sample_info_hash = sample_info_hash(); + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_zeroed_metadata(&sample_info_hash); + + assert_eq!(ScrapeData::zeroed(&vec![sample_info_hash]), expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { + let tracker = whitelisted_tracker(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + let mut peer = incomplete_peer(); + tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + + // Announce twice to force non zeroed swarm metadata + let mut peer = complete_peer(); + tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected zeroed swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file(&info_hash, SwarmMetadata::zeroed()); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + } + + mod configured_as_private { + + mod handling_authentication { + use std::str::FromStr; + use std::time::Duration; + + use crate::tracker::auth; + use crate::tracker::tests::the_tracker::private_tracker; + + #[tokio::test] + async fn it_should_generate_the_expiring_authentication_keys() { + let tracker = private_tracker(); + + let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + assert_eq!(key.valid_until, Duration::from_secs(100)); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_by_using_a_key() { + let tracker = private_tracker(); + + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + let result = tracker.authenticate(&expiring_key.key()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { + let tracker = private_tracker(); + + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let result = tracker.authenticate(&unregistered_key).await; + + assert!(result.is_err()); + } + + #[tokio::test] + async fn it_should_verify_a_valid_authentication_key() { + // todo: this should not be tested directly because + // `verify_auth_key` should be a private method. + let tracker = private_tracker(); + + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); + } + + #[tokio::test] + async fn it_should_fail_verifying_an_unregistered_authentication_key() { + let tracker = private_tracker(); + + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + assert!(tracker.verify_auth_key(&unregistered_key).await.is_err()); + } + + #[tokio::test] + async fn it_should_remove_an_authentication_key() { + let tracker = private_tracker(); + + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + let result = tracker.remove_auth_key(&expiring_key.key()).await; + + assert!(result.is_ok()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_err()); + } + + #[tokio::test] + async fn it_should_load_authentication_keys_from_the_database() { + let tracker = private_tracker(); + + let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + + // Remove the newly generated key in memory + tracker.keys.write().await.remove(&expiring_key.key()); + + let result = tracker.load_keys_from_database().await; + + assert!(result.is_ok()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); + } + } + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } + + mod configured_as_private_and_whitelisted { + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } + + mod handling_torrent_persistence { + use aquatic_udp_protocol::AnnounceEvent; + + use crate::tracker::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + + #[tokio::test] + async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { + let tracker = tracker_persisting_torrents_in_database(); + + let info_hash = sample_info_hash(); + + let mut peer = sample_peer(); + + peer.event = AnnounceEvent::Started; + let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + assert_eq!(swarm_stats.completed, 0); + + peer.event = AnnounceEvent::Completed; + let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + assert_eq!(swarm_stats.completed, 1); + + // Remove the newly updated torrent from memory + tracker.torrents.write().await.remove(&info_hash); + + tracker.load_torrents_from_database().await.unwrap(); + + let torrents = tracker.get_torrents().await; + assert!(torrents.contains_key(&info_hash)); + + let torrent_entry = torrents.get(&info_hash).unwrap(); + + // It persists the number of completed peers. + assert_eq!(torrent_entry.completed, 1); + + // It does not persist the peers + assert!(torrent_entry.peers.is_empty()); + } + } + } +} diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs new file mode 100644 index 000000000..6a298c9df --- /dev/null +++ b/src/tracker/peer.rs @@ -0,0 +1,436 @@ +use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use serde; +use serde::Serialize; +use thiserror::Error; + +use crate::shared::bit_torrent::common::{AnnounceEventDef, NumberOfBytesDef}; +use crate::shared::clock::utils::ser_unix_time_value; +use crate::shared::clock::DurationSinceUnixEpoch; + +#[derive(PartialEq, Eq, Debug)] +pub enum IPVersion { + IPv4, + IPv6, +} + +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] +pub struct Peer { + pub peer_id: Id, + pub peer_addr: SocketAddr, + #[serde(serialize_with = "ser_unix_time_value")] + pub updated: DurationSinceUnixEpoch, + #[serde(with = "NumberOfBytesDef")] + pub uploaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub downloaded: NumberOfBytes, + #[serde(with = "NumberOfBytesDef")] + pub left: NumberOfBytes, // The number of bytes this peer still has to download + #[serde(with = "AnnounceEventDef")] + pub event: AnnounceEvent, +} + +impl Peer { + #[must_use] + pub fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + pub fn ip(&mut self) -> IpAddr { + self.peer_addr.ip() + } + + pub fn change_ip(&mut self, new_ip: &IpAddr) { + self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); + } + + /// The IP version used by the peer: IPV4 or IPV6 + #[must_use] + pub fn ip_version(&self) -> IPVersion { + if self.peer_addr.is_ipv4() { + return IPVersion::IPv4; + } + IPVersion::IPv6 + } +} + +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] +pub struct Id(pub [u8; 20]); + +const PEER_ID_BYTES_LEN: usize = 20; + +#[derive(Error, Debug)] +pub enum IdConversionError { + #[error("not enough bytes for peer id: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + #[error("too many bytes for peer id: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl Id { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), PEER_ID_BYTES_LEN); + let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + #[must_use] + pub fn to_bytes(&self) -> [u8; 20] { + self.0 + } +} + +impl From<[u8; 20]> for Id { + fn from(bytes: [u8; 20]) -> Self { + Id(bytes) + } +} + +impl TryFrom> for Id { + type Error = IdConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < PEER_ID_BYTES_LEN { + return Err(IdConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + if bytes.len() > PEER_ID_BYTES_LEN { + return Err(IdConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl std::str::FromStr for Id { + type Err = IdConversionError; + + fn from_str(s: &str) -> Result { + Self::try_from(s.as_bytes().to_vec()) + } +} + +impl std::fmt::Display for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.to_hex_string() { + Some(hex) => write!(f, "{hex}"), + None => write!(f, ""), + } + } +} + +impl Id { + #[must_use] + /// Converts to hex string. + /// + /// For the Id `-qB00000000000000000` it returns `2d71423030303030303030303030303030303030` + /// + /// For example: + /// + ///```text + /// Bytes = Hex + /// -qB00000000000000000 = 2d71423030303030303030303030303030303030 + /// -qB00000000000000000 = 2d 71 42 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 + /// + /// ------------- + /// |Char | Hex | + /// ------------- + /// | - | 2D | + /// | q | 71 | + /// | B | 42 | + /// | 0 | 30 | + /// ------------- + /// ``` + /// + /// Return `None` is some of the bytes are invalid UTF8 values. + /// + /// # Panics + /// + /// It will panic if the `binascii::bin2hex` from a too-small output buffer. + pub fn to_hex_string(&self) -> Option { + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + + match std::str::from_utf8(&tmp) { + Ok(hex) => Some(format!("0x{hex}")), + Err(_) => None, + } + } + + #[must_use] + pub fn get_client_name(&self) -> Option<&'static str> { + if self.0[0] == b'M' { + return Some("BitTorrent"); + } + if self.0[0] == b'-' { + let name = match &self.0[1..3] { + b"AG" | b"A~" => "Ares", + b"AR" => "Arctic", + b"AV" => "Avicora", + b"AX" => "BitPump", + b"AZ" => "Azureus", + b"BB" => "BitBuddy", + b"BC" => "BitComet", + b"BF" => "Bitflu", + b"BG" => "BTG (uses Rasterbar libtorrent)", + b"BR" => "BitRocket", + b"BS" => "BTSlave", + b"BX" => "~Bittorrent X", + b"CD" => "Enhanced CTorrent", + b"CT" => "CTorrent", + b"DE" => "DelugeTorrent", + b"DP" => "Propagate Data Client", + b"EB" => "EBit", + b"ES" => "electric sheep", + b"FT" => "FoxTorrent", + b"FW" => "FrostWire", + b"FX" => "Freebox BitTorrent", + b"GS" => "GSTorrent", + b"HL" => "Halite", + b"HN" => "Hydranode", + b"KG" => "KGet", + b"KT" => "KTorrent", + b"LH" => "LH-ABC", + b"LP" => "Lphant", + b"LT" => "libtorrent", + b"lt" => "libTorrent", + b"LW" => "LimeWire", + b"MO" => "MonoTorrent", + b"MP" => "MooPolice", + b"MR" => "Miro", + b"MT" => "MoonlightTorrent", + b"NX" => "Net Transport", + b"PD" => "Pando", + b"qB" => "qBittorrent", + b"QD" => "QQDownload", + b"QT" => "Qt 4 Torrent example", + b"RT" => "Retriever", + b"S~" => "Shareaza alpha/beta", + b"SB" => "~Swiftbit", + b"SS" => "SwarmScope", + b"ST" => "SymTorrent", + b"st" => "sharktorrent", + b"SZ" => "Shareaza", + b"TN" => "TorrentDotNET", + b"TR" => "Transmission", + b"TS" => "Torrentstorm", + b"TT" => "TuoTu", + b"UL" => "uLeecher!", + b"UT" => "µTorrent", + b"UW" => "µTorrent Web", + b"VG" => "Vagaa", + b"WD" => "WebTorrent Desktop", + b"WT" => "BitLet", + b"WW" => "WebTorrent", + b"WY" => "FireTorrent", + b"XL" => "Xunlei", + b"XT" => "XanTorrent", + b"XX" => "Xtorrent", + b"ZT" => "ZipTorrent", + _ => return None, + }; + Some(name) + } else { + None + } + } +} + +impl Serialize for Id { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + #[derive(Serialize)] + struct PeerIdInfo<'a> { + id: Option, + client: Option<&'a str>, + } + + let obj = PeerIdInfo { + id: self.to_hex_string(), + client: self.get_client_name(), + }; + obj.serialize(serializer) + } +} + +#[cfg(test)] +mod test { + + mod torrent_peer_id { + use crate::tracker::peer; + + #[test] + fn should_be_instantiated_from_a_byte_slice() { + let id = peer::Id::from_bytes(&[ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_instantiate_from_a_byte_slice_with_less_than_20_bytes() { + let less_than_20_bytes = [0; 19]; + let _ = peer::Id::from_bytes(&less_than_20_bytes); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_instantiate_from_a_byte_slice_with_more_than_20_bytes() { + let more_than_20_bytes = [0; 21]; + let _ = peer::Id::from_bytes(&more_than_20_bytes); + } + + #[test] + fn should_be_instantiated_from_a_string() { + let id = "-qB00000000000000001".parse::().unwrap(); + + let expected_id = peer::Id([ + 45, 113, 66, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 49, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + fn should_be_converted_from_a_20_byte_array() { + let id = peer::Id::from([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + fn should_be_converted_from_a_byte_vector() { + let id = peer::Id::try_from( + [ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ] + .to_vec(), + ) + .unwrap(); + + let expected_id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + + assert_eq!(id, expected_id); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_convert_from_a_byte_vector_with_less_than_20_bytes() { + let _ = peer::Id::try_from([0; 19].to_vec()).unwrap(); + } + + #[test] + #[should_panic] + fn should_fail_trying_to_convert_from_a_byte_vector_with_more_than_20_bytes() { + let _ = peer::Id::try_from([0; 21].to_vec()).unwrap(); + } + + #[test] + fn should_be_converted_to_hex_string() { + let id = peer::Id(*b"-qB00000000000000000"); + assert_eq!(id.to_hex_string().unwrap(), "0x2d71423030303030303030303030303030303030"); + + let id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + assert_eq!(id.to_hex_string().unwrap(), "0x009f9296009f9296009f9296009f9296009f9296"); + } + + #[test] + fn should_be_converted_into_string_type_using_the_hex_string_format() { + let id = peer::Id(*b"-qB00000000000000000"); + assert_eq!(id.to_string(), "0x2d71423030303030303030303030303030303030"); + + let id = peer::Id([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]); + assert_eq!(id.to_string(), "0x009f9296009f9296009f9296009f9296009f9296"); + } + + #[test] + fn should_return_the_inner_bytes() { + assert_eq!(peer::Id(*b"-qB00000000000000000").to_bytes(), *b"-qB00000000000000000"); + } + } + + mod torrent_peer { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use serde_json::Value; + + use crate::shared::clock::{Current, Time}; + use crate::tracker::peer::{self, Peer}; + + #[test] + fn it_should_be_serializable() { + let torrent_peer = Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: Current::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + + let raw_json = serde_json::to_string(&torrent_peer).unwrap(); + + let expected_raw_json = r#" + { + "peer_id": { + "id": "0x2d71423030303030303030303030303030303030", + "client": "qBittorrent" + }, + "peer_addr":"126.0.0.1:8080", + "updated":0, + "uploaded":0, + "downloaded":0, + "left":0, + "event":"Started" + } + "#; + + assert_eq!( + serde_json::from_str::(&raw_json).unwrap(), + serde_json::from_str::(expected_raw_json).unwrap() + ); + } + } +} diff --git a/src/tracker/services/mod.rs b/src/tracker/services/mod.rs new file mode 100644 index 000000000..8667f79a9 --- /dev/null +++ b/src/tracker/services/mod.rs @@ -0,0 +1,25 @@ +pub mod statistics; +pub mod torrent; + +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use crate::tracker::Tracker; + +/// # Panics +/// +/// Will panic if tracker cannot be instantiated. +#[must_use] +pub fn tracker_factory(config: Arc) -> Tracker { + // Initialize statistics + let (stats_event_sender, stats_repository) = statistics::setup::factory(config.tracker_usage_statistics); + + // Initialize Torrust tracker + match Tracker::new(config, stats_event_sender, stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } +} diff --git a/src/tracker/services/statistics/mod.rs b/src/tracker/services/statistics/mod.rs new file mode 100644 index 000000000..cae4d1d69 --- /dev/null +++ b/src/tracker/services/statistics/mod.rs @@ -0,0 +1,66 @@ +pub mod setup; + +use std::sync::Arc; + +use crate::tracker::statistics::Metrics; +use crate::tracker::{TorrentsMetrics, Tracker}; + +#[derive(Debug, PartialEq)] +pub struct TrackerMetrics { + pub torrents_metrics: TorrentsMetrics, + pub protocol_metrics: Metrics, +} + +pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { + let torrents_metrics = tracker.get_torrents_metrics().await; + let stats = tracker.get_stats().await; + + TrackerMetrics { + torrents_metrics, + protocol_metrics: Metrics { + tcp4_connections_handled: stats.tcp4_connections_handled, + tcp4_announces_handled: stats.tcp4_announces_handled, + tcp4_scrapes_handled: stats.tcp4_scrapes_handled, + tcp6_connections_handled: stats.tcp6_connections_handled, + tcp6_announces_handled: stats.tcp6_announces_handled, + tcp6_scrapes_handled: stats.tcp6_scrapes_handled, + udp4_connections_handled: stats.udp4_connections_handled, + udp4_announces_handled: stats.udp4_announces_handled, + udp4_scrapes_handled: stats.udp4_scrapes_handled, + udp6_connections_handled: stats.udp6_connections_handled, + udp6_announces_handled: stats.udp6_announces_handled, + udp6_scrapes_handled: stats.udp6_scrapes_handled, + }, + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; + + use crate::tracker; + use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; + use crate::tracker::services::tracker_factory; + + pub fn tracker_configuration() -> Arc { + Arc::new(configuration::ephemeral()) + } + + #[tokio::test] + async fn the_statistics_service_should_return_the_tracker_metrics() { + let tracker = Arc::new(tracker_factory(tracker_configuration())); + + let tracker_metrics = get_metrics(tracker.clone()).await; + + assert_eq!( + tracker_metrics, + TrackerMetrics { + torrents_metrics: tracker::TorrentsMetrics::default(), + protocol_metrics: tracker::statistics::Metrics::default(), + } + ); + } +} diff --git a/src/tracker/services/statistics/setup.rs b/src/tracker/services/statistics/setup.rs new file mode 100644 index 000000000..b7cb831cb --- /dev/null +++ b/src/tracker/services/statistics/setup.rs @@ -0,0 +1,37 @@ +use crate::tracker::statistics; + +#[must_use] +pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { + let mut stats_event_sender = None; + + let mut stats_tracker = statistics::Keeper::new(); + + if tracker_usage_statistics { + stats_event_sender = Some(stats_tracker.run_event_listener()); + } + + (stats_event_sender, stats_tracker.repository) +} + +#[cfg(test)] +mod test { + use super::factory; + + #[tokio::test] + async fn should_not_send_any_event_when_statistics_are_disabled() { + let tracker_usage_statistics = false; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_none()); + } + + #[tokio::test] + async fn should_send_events_when_statistics_are_enabled() { + let tracker_usage_statistics = true; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_some()); + } +} diff --git a/src/tracker/services/torrent.rs b/src/tracker/services/torrent.rs new file mode 100644 index 000000000..30d24eb00 --- /dev/null +++ b/src/tracker/services/torrent.rs @@ -0,0 +1,334 @@ +use std::sync::Arc; + +use serde::Deserialize; + +use crate::shared::bit_torrent::info_hash::InfoHash; +use crate::tracker::peer::Peer; +use crate::tracker::Tracker; + +#[derive(Debug, PartialEq)] +pub struct Info { + pub info_hash: InfoHash, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, + pub peers: Option>, +} + +#[derive(Debug, PartialEq, Clone)] +pub struct BasicInfo { + pub info_hash: InfoHash, + pub seeders: u64, + pub completed: u64, + pub leechers: u64, +} + +#[derive(Deserialize)] +pub struct Pagination { + pub offset: u32, + pub limit: u32, +} + +impl Pagination { + #[must_use] + pub fn new(offset: u32, limit: u32) -> Self { + Self { offset, limit } + } + + #[must_use] + pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { + let offset = match offset_option { + Some(offset) => offset, + None => Pagination::default_offset(), + }; + let limit = match limit_option { + Some(offset) => offset, + None => Pagination::default_limit(), + }; + + Self { offset, limit } + } + + #[must_use] + pub fn default_offset() -> u32 { + 0 + } + + #[must_use] + pub fn default_limit() -> u32 { + 4000 + } +} + +impl Default for Pagination { + fn default() -> Self { + Self { + offset: Self::default_offset(), + limit: Self::default_limit(), + } + } +} + +pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { + let db = tracker.get_torrents().await; + + let torrent_entry_option = db.get(info_hash); + + let Some(torrent_entry) = torrent_entry_option else { + return None; + }; + + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + let peers = torrent_entry.get_all_peers(); + + let peers = Some(peers.iter().map(|peer| (**peer)).collect()); + + Some(Info { + info_hash: *info_hash, + seeders: u64::from(seeders), + completed: u64::from(completed), + leechers: u64::from(leechers), + peers, + }) +} + +pub async fn get_torrents(tracker: Arc, pagination: &Pagination) -> Vec { + let db = tracker.get_torrents().await; + + db.iter() + .map(|(info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + BasicInfo { + info_hash: *info_hash, + seeders: u64::from(seeders), + completed: u64::from(completed), + leechers: u64::from(leechers), + } + }) + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .collect() +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::shared::clock::DurationSinceUnixEpoch; + use crate::tracker::peer; + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + + mod getting_a_torrent_info { + + use std::str::FromStr; + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; + + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::services::torrent::tests::sample_peer; + use crate::tracker::services::torrent::{get_torrent_info, Info}; + use crate::tracker::services::tracker_factory; + + pub fn tracker_configuration() -> Arc { + Arc::new(configuration::ephemeral()) + } + + #[tokio::test] + async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { + let tracker = Arc::new(tracker_factory(tracker_configuration())); + + let torrent_info = get_torrent_info( + tracker.clone(), + &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), + ) + .await; + + assert!(torrent_info.is_none()); + } + + #[tokio::test] + async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { + let tracker = Arc::new(tracker_factory(tracker_configuration())); + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + tracker + .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) + .await; + + let torrent_info = get_torrent_info(tracker.clone(), &info_hash).await.unwrap(); + + assert_eq!( + torrent_info, + Info { + info_hash: InfoHash::from_str(&hash).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![sample_peer()]), + } + ); + } + } + + mod searching_for_torrents { + + use std::str::FromStr; + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; + + use crate::shared::bit_torrent::info_hash::InfoHash; + use crate::tracker::services::torrent::tests::sample_peer; + use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; + use crate::tracker::services::tracker_factory; + + pub fn tracker_configuration() -> Arc { + Arc::new(configuration::ephemeral()) + } + + #[tokio::test] + async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { + let tracker = Arc::new(tracker_factory(tracker_configuration())); + + let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; + + assert_eq!(torrents, vec![]); + } + + #[tokio::test] + async fn should_return_a_summarized_info_for_all_torrents() { + let tracker = Arc::new(tracker_factory(tracker_configuration())); + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) + .await; + + let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; + + assert_eq!( + torrents, + vec![BasicInfo { + info_hash: InfoHash::from_str(&hash).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }] + ); + } + + #[tokio::test] + async fn should_allow_limiting_the_number_of_torrents_in_the_result() { + let tracker = Arc::new(tracker_factory(tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) + .await; + tracker + .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) + .await; + + let offset = 0; + let limit = 1; + + let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; + + assert_eq!(torrents.len(), 1); + } + + #[tokio::test] + async fn should_allow_using_pagination_in_the_result() { + let tracker = Arc::new(tracker_factory(tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + + tracker + .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) + .await; + tracker + .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) + .await; + + let offset = 1; + let limit = 4000; + + let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; + + assert_eq!(torrents.len(), 1); + assert_eq!( + torrents, + vec![BasicInfo { + info_hash: InfoHash::from_str(&hash1).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }] + ); + } + + #[tokio::test] + async fn should_return_torrents_ordered_by_info_hash() { + let tracker = Arc::new(tracker_factory(tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + tracker + .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) + .await; + + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + tracker + .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) + .await; + + let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; + + assert_eq!( + torrents, + vec![ + BasicInfo { + info_hash: InfoHash::from_str(&hash2).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }, + BasicInfo { + info_hash: InfoHash::from_str(&hash1).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + } + ] + ); + } + } +} diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs new file mode 100644 index 000000000..f9079962c --- /dev/null +++ b/src/tracker/statistics.rs @@ -0,0 +1,431 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use log::debug; +#[cfg(test)] +use mockall::{automock, predicate::str}; +use tokio::sync::mpsc::error::SendError; +use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; + +const CHANNEL_BUFFER_SIZE: usize = 65_535; + +#[derive(Debug, PartialEq, Eq)] +pub enum Event { + // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } + // Attributes are enums too. + Tcp4Announce, + Tcp4Scrape, + Tcp6Announce, + Tcp6Scrape, + Udp4Connect, + Udp4Announce, + Udp4Scrape, + Udp6Connect, + Udp6Announce, + Udp6Scrape, +} + +#[derive(Debug, PartialEq, Default)] +pub struct Metrics { + pub tcp4_connections_handled: u64, + pub tcp4_announces_handled: u64, + pub tcp4_scrapes_handled: u64, + pub tcp6_connections_handled: u64, + pub tcp6_announces_handled: u64, + pub tcp6_scrapes_handled: u64, + pub udp4_connections_handled: u64, + pub udp4_announces_handled: u64, + pub udp4_scrapes_handled: u64, + pub udp6_connections_handled: u64, + pub udp6_announces_handled: u64, + pub udp6_scrapes_handled: u64, +} + +pub struct Keeper { + pub repository: Repo, +} + +impl Default for Keeper { + fn default() -> Self { + Self::new() + } +} + +impl Keeper { + #[must_use] + pub fn new() -> Self { + Self { repository: Repo::new() } + } + + #[must_use] + pub fn new_active_instance() -> (Box, Repo) { + let mut stats_tracker = Self::new(); + + let stats_event_sender = stats_tracker.run_event_listener(); + + (stats_event_sender, stats_tracker.repository) + } + + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + + let stats_repository = self.repository.clone(); + + tokio::spawn(async move { event_listener(receiver, stats_repository).await }); + + Box::new(Sender { sender }) + } +} + +async fn event_listener(mut receiver: mpsc::Receiver, stats_repository: Repo) { + while let Some(event) = receiver.recv().await { + event_handler(event, &stats_repository).await; + } +} + +async fn event_handler(event: Event, stats_repository: &Repo) { + match event { + // TCP4 + Event::Tcp4Announce => { + stats_repository.increase_tcp4_announces().await; + stats_repository.increase_tcp4_connections().await; + } + Event::Tcp4Scrape => { + stats_repository.increase_tcp4_scrapes().await; + stats_repository.increase_tcp4_connections().await; + } + + // TCP6 + Event::Tcp6Announce => { + stats_repository.increase_tcp6_announces().await; + stats_repository.increase_tcp6_connections().await; + } + Event::Tcp6Scrape => { + stats_repository.increase_tcp6_scrapes().await; + stats_repository.increase_tcp6_connections().await; + } + + // UDP4 + Event::Udp4Connect => { + stats_repository.increase_udp4_connections().await; + } + Event::Udp4Announce => { + stats_repository.increase_udp4_announces().await; + } + Event::Udp4Scrape => { + stats_repository.increase_udp4_scrapes().await; + } + + // UDP6 + Event::Udp6Connect => { + stats_repository.increase_udp6_connections().await; + } + Event::Udp6Announce => { + stats_repository.increase_udp6_announces().await; + } + Event::Udp6Scrape => { + stats_repository.increase_udp6_scrapes().await; + } + } + + debug!("stats: {:?}", stats_repository.get_stats().await); +} + +#[async_trait] +#[cfg_attr(test, automock)] +pub trait EventSender: Sync + Send { + async fn send_event(&self, event: Event) -> Option>>; +} + +pub struct Sender { + sender: mpsc::Sender, +} + +#[async_trait] +impl EventSender for Sender { + async fn send_event(&self, event: Event) -> Option>> { + Some(self.sender.send(event).await) + } +} + +#[derive(Clone)] +pub struct Repo { + pub stats: Arc>, +} + +impl Default for Repo { + fn default() -> Self { + Self::new() + } +} + +impl Repo { + #[must_use] + pub fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(Metrics::default())), + } + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + pub async fn increase_tcp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_scrapes_handled += 1; + drop(stats_lock); + } +} + +#[cfg(test)] +mod tests { + + mod stats_tracker { + use crate::tracker::statistics::{Event, Keeper, Metrics}; + + #[tokio::test] + async fn should_contain_the_tracker_statistics() { + let stats_tracker = Keeper::new(); + + let stats = stats_tracker.repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); + } + + #[tokio::test] + async fn should_create_an_event_sender_to_send_statistical_events() { + let mut stats_tracker = Keeper::new(); + + let event_sender = stats_tracker.run_event_listener(); + + let result = event_sender.send_event(Event::Udp4Connect).await; + + assert!(result.is_some()); + } + } + + mod event_handler { + use crate::tracker::statistics::{event_handler, Event, Repo}; + + #[tokio::test] + async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp4Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp6Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_scrapes_handled, 1); + } + } +} diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs new file mode 100644 index 000000000..882e52ff1 --- /dev/null +++ b/src/tracker/torrent.rs @@ -0,0 +1,419 @@ +use std::time::Duration; + +use aquatic_udp_protocol::AnnounceEvent; +use serde::{Deserialize, Serialize}; + +use super::peer::{self, Peer}; +use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::shared::clock::{Current, TimeNow}; + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct Entry { + #[serde(skip)] + pub peers: std::collections::BTreeMap, + pub completed: u32, +} + +/// Swarm statistics for one torrent. +/// Swarm metadata dictionary in the scrape response. +/// BEP 48: +#[derive(Debug, PartialEq, Default)] +pub struct SwarmMetadata { + pub complete: u32, // The number of active peers that have completed downloading (seeders) + pub downloaded: u32, // The number of peers that have ever completed downloading + pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +} + +impl SwarmMetadata { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} + +/// Swarm statistics for one torrent. +/// Alternative struct for swarm metadata in scrape response. +#[derive(Debug, PartialEq, Default)] +pub struct SwarmStats { + pub completed: u32, // The number of peers that have ever completed downloading + pub seeders: u32, // The number of active peers that have completed downloading (seeders) + pub leechers: u32, // The number of active peers that have not completed downloading (leechers) +} + +impl Entry { + #[must_use] + pub fn new() -> Entry { + Entry { + peers: std::collections::BTreeMap::new(), + completed: 0, + } + } + + // Update peer and return completed (times torrent has been downloaded) + pub fn update_peer(&mut self, peer: &peer::Peer) -> bool { + let mut did_torrent_stats_change: bool = false; + + match peer.event { + AnnounceEvent::Stopped => { + let _ = self.peers.remove(&peer.peer_id); + } + AnnounceEvent::Completed => { + let peer_old = self.peers.insert(peer.peer_id, *peer); + // Don't count if peer was not previously known + if peer_old.is_some() { + self.completed += 1; + did_torrent_stats_change = true; + } + } + _ => { + let _ = self.peers.insert(peer.peer_id, *peer); + } + } + + did_torrent_stats_change + } + + /// Get all peers, limiting the result to the maximum number of scrape torrents. + #[must_use] + pub fn get_all_peers(&self) -> Vec<&peer::Peer> { + self.peers.values().take(MAX_SCRAPE_TORRENTS as usize).collect() + } + + /// Returns the list of peers for a given client. + /// It filters out the input peer. + #[must_use] + pub fn get_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { + self.peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer.peer_addr != client.peer_addr) + // Limit the number of peers on the result + .take(MAX_SCRAPE_TORRENTS as usize) + .collect() + } + + #[allow(clippy::cast_possible_truncation)] + #[must_use] + pub fn get_stats(&self) -> (u32, u32, u32) { + let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; + let leechers: u32 = self.peers.len() as u32 - seeders; + (seeders, self.completed, leechers) + } + + #[must_use] + pub fn get_swarm_metadata(&self) -> SwarmMetadata { + // code-review: consider using always this function instead of `get_stats`. + let (seeders, completed, leechers) = self.get_stats(); + SwarmMetadata { + complete: seeders, + downloaded: completed, + incomplete: leechers, + } + } + + pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { + let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); + self.peers.retain(|_, peer| peer.updated > current_cutoff); + } +} + +impl Default for Entry { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + + mod torrent_entry { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::ops::Sub; + use std::time::Duration; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; + use crate::tracker::peer; + use crate::tracker::torrent::Entry; + + struct TorrentPeerBuilder { + peer: peer::Peer, + } + + impl TorrentPeerBuilder { + pub fn default() -> TorrentPeerBuilder { + let default_peer = peer::Peer { + peer_id: peer::Id([0u8; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: Current::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + TorrentPeerBuilder { peer: default_peer } + } + + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + pub fn into(self) -> peer::Peer { + self.peer + } + } + + /// A torrent seeder is a peer with 0 bytes left to download which + /// has not announced it has stopped + fn a_torrent_seeder() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(0) + .with_event_completed() + .into() + } + + /// A torrent leecher is a peer that is not a seeder. + /// Leecher: left > 0 OR event = Stopped + fn a_torrent_leecher() -> peer::Peer { + TorrentPeerBuilder::default() + .with_number_of_bytes_left(1) + .with_event_completed() + .into() + } + + #[test] + fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { + let torrent_entry = Entry::new(); + + assert_eq!(torrent_entry.get_all_peers().len(), 0); + } + + #[test] + fn a_new_peer_can_be_added_to_a_torrent_entry() { + let mut torrent_entry = Entry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.update_peer(&torrent_peer); // Add the peer + + assert_eq!(*torrent_entry.get_all_peers()[0], torrent_peer); + assert_eq!(torrent_entry.get_all_peers().len(), 1); + } + + #[test] + fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { + let mut torrent_entry = Entry::new(); + let torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.update_peer(&torrent_peer); // Add the peer + + assert_eq!(torrent_entry.get_all_peers(), vec![&torrent_peer]); + } + + #[test] + fn a_peer_can_be_updated_in_a_torrent_entry() { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_all_peers()[0].event, AnnounceEvent::Completed); + } + + #[test] + fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Stopped; // Update the peer + torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert_eq!(torrent_entry.get_all_peers().len(), 0); + } + + #[test] + fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + + torrent_entry.update_peer(&torrent_peer); // Add the peer + + torrent_peer.event = AnnounceEvent::Completed; // Update the peer + let stats_have_changed = torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry + + assert!(stats_have_changed); + } + + #[test] + fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( + ) { + let mut torrent_entry = Entry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Add a peer that did not exist before in the entry + let torrent_stats_have_not_changed = !torrent_entry.update_peer(&torrent_peer_announcing_complete_event); + + assert!(torrent_stats_have_not_changed); + } + + #[test] + fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() + { + let mut torrent_entry = Entry::new(); + let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); + torrent_entry.update_peer(&torrent_peer); // Add peer + + // Get peers excluding the one we have just added + let peers = torrent_entry.get_peers_for_peer(&torrent_peer); + + assert_eq!(peers.len(), 0); + } + + #[test] + fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { + let mut torrent_entry = Entry::new(); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + + // Add peer 1 + let torrent_peer_1 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8080)) + .into(); + torrent_entry.update_peer(&torrent_peer_1); + + // Add peer 2 + let torrent_peer_2 = TorrentPeerBuilder::default() + .with_peer_address(SocketAddr::new(peer_ip, 8081)) + .into(); + torrent_entry.update_peer(&torrent_peer_2); + + // Get peers for peer 1 + let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1); + + // The peer 2 using the same IP but different port should be included + assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); + assert_eq!(peers[0].peer_addr.port(), 8081); + } + + fn peer_id_from_i32(number: i32) -> peer::Id { + let peer_id = number.to_le_bytes(); + peer::Id([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], + peer_id[2], peer_id[3], + ]) + } + + #[test] + fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { + let mut torrent_entry = Entry::new(); + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let torrent_peer = TorrentPeerBuilder::default() + .with_peer_id(peer_id_from_i32(peer_number)) + .into(); + torrent_entry.update_peer(&torrent_peer); + } + + let peers = torrent_entry.get_all_peers(); + + assert_eq!(peers.len(), 74); + } + + #[test] + fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { + let mut torrent_entry = Entry::new(); + let torrent_seeder = a_torrent_seeder(); + + torrent_entry.update_peer(&torrent_seeder); // Add seeder + + assert_eq!(torrent_entry.get_stats().0, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { + let mut torrent_entry = Entry::new(); + let torrent_leecher = a_torrent_leecher(); + + torrent_entry.update_peer(&torrent_leecher); // Add leecher + + assert_eq!(torrent_entry.get_stats().2, 1); + } + + #[test] + fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( + ) { + let mut torrent_entry = Entry::new(); + let mut torrent_peer = TorrentPeerBuilder::default().into(); + torrent_entry.update_peer(&torrent_peer); // Add the peer + + // Announce "Completed" torrent download event. + torrent_peer.event = AnnounceEvent::Completed; + torrent_entry.update_peer(&torrent_peer); // Update the peer + + let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; + + assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); + } + + #[test] + fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { + let mut torrent_entry = Entry::new(); + let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); + + // Announce "Completed" torrent download event. + // It's the first event announced from this peer. + torrent_entry.update_peer(&torrent_peer_announcing_complete_event); // Add the peer + + let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; + + assert_eq!(number_of_peers_with_completed_torrent, 0); + } + + #[test] + fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { + let mut torrent_entry = Entry::new(); + + let timeout = 120u32; + + let now = Working::now(); + Stopped::local_set(&now); + + let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); + let inactive_peer = TorrentPeerBuilder::default() + .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) + .into(); + torrent_entry.update_peer(&inactive_peer); // Add the peer + + torrent_entry.remove_inactive_peers(timeout); + + assert_eq!(torrent_entry.peers.len(), 0); + } + } +} diff --git a/src/utils.rs b/src/utils.rs deleted file mode 100644 index e3a8302df..000000000 --- a/src/utils.rs +++ /dev/null @@ -1,58 +0,0 @@ -use std::net::SocketAddr; -use std::time::SystemTime; -use std::error::Error; -use std::fmt::Write; -use std::io::Cursor; -use aquatic_udp_protocol::ConnectionId; -use byteorder::{BigEndian, ReadBytesExt}; - -pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { - match std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) { - Ok(duration) => ConnectionId(((duration.as_secs() / 3600) | ((remote_address.port() as u64) << 36)) as i64), - Err(_) => ConnectionId(0x7FFFFFFFFFFFFFFF), - } -} - -pub fn current_time() -> u64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH).unwrap() - .as_secs() -} - -pub fn url_encode_bytes(content: &[u8]) -> Result> { - let mut out: String = String::new(); - - for byte in content.iter() { - match *byte as char { - '0'..='9' | 'a'..='z' | 'A'..='Z' | '.' | '-' | '_' | '~' => out.push(*byte as char), - _ => write!(&mut out, "%{:02x}", byte)?, - }; - } - - Ok(out) -} - -// Function that will convert a small or big number into the smallest form of a byte array. -pub async fn convert_int_to_bytes(number: &u64) -> Vec { - let mut return_data: Vec = Vec::new(); - // return_data.extend(number.to_be_bytes().reverse()); - for i in 1..8 { - if number < &256u64.pow(i) { - let start: usize = 16usize - i as usize; - return_data.extend(number.to_be_bytes()[start..8].iter()); - return return_data; - } - } - return return_data; -} - -pub async fn convert_bytes_to_int(array: &Vec) -> u64 { - let mut array_fixed: Vec = Vec::new(); - let size = 8 - array.len(); - for _ in 0..size { - array_fixed.push(0); - } - array_fixed.extend(array); - let mut rdr = Cursor::new(array_fixed); - return rdr.read_u64::().unwrap(); -} diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 000000000..04860056c --- /dev/null +++ b/tests/README.md @@ -0,0 +1,9 @@ +### Running Benchmarks + +#### HTTP(S) Announce Peer + Torrent +For this benchmark we use the tool [wrk](https://github.com/wg/wrk). + +To run the benchmark using wrk, execute the following example script (change the url to your own tracker url): + + wrk -c200 -t1 -d10s -s ./wrk_benchmark_announce.lua --latency http://tracker.dutchbits.nl + diff --git a/tests/common/app.rs b/tests/common/app.rs new file mode 100644 index 000000000..ee3fba064 --- /dev/null +++ b/tests/common/app.rs @@ -0,0 +1,8 @@ +use std::sync::Arc; + +use torrust_tracker::bootstrap; +use torrust_tracker::tracker::Tracker; + +pub fn setup_with_configuration(configuration: &Arc) -> Arc { + bootstrap::app::initialize_with_configuration(configuration) +} diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs new file mode 100644 index 000000000..7062c8376 --- /dev/null +++ b/tests/common/fixtures.rs @@ -0,0 +1,78 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +use torrust_tracker::tracker::peer::{self, Id, Peer}; + +pub struct PeerBuilder { + peer: Peer, +} + +impl PeerBuilder { + #[allow(dead_code)] + pub fn default() -> PeerBuilder { + Self { + peer: default_peer_for_testing(), + } + } + + #[allow(dead_code)] + pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + self.peer.peer_id = *peer_id; + self + } + + #[allow(dead_code)] + pub fn with_peer_addr(mut self, peer_addr: &SocketAddr) -> Self { + self.peer.peer_addr = *peer_addr; + self + } + + #[allow(dead_code)] + pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + #[allow(dead_code)] + pub fn with_no_bytes_pending_to_download(mut self) -> Self { + self.peer.left = NumberOfBytes(0); + self + } + + #[allow(dead_code)] + pub fn build(self) -> Peer { + self.into() + } + + #[allow(dead_code)] + pub fn into(self) -> Peer { + self.peer + } +} + +#[allow(dead_code)] +fn default_peer_for_testing() -> Peer { + Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } +} + +#[allow(dead_code)] +pub fn invalid_info_hashes() -> Vec { + [ + "0".to_string(), + "-1".to_string(), + "1.1".to_string(), + "INVALID INFOHASH".to_string(), + "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40 + "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char + ] + .to_vec() +} diff --git a/tests/common/http.rs b/tests/common/http.rs new file mode 100644 index 000000000..d682027fd --- /dev/null +++ b/tests/common/http.rs @@ -0,0 +1,54 @@ +pub type ReqwestQuery = Vec; +pub type ReqwestQueryParam = (String, String); + +/// URL Query component +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +/// URL query param +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs new file mode 100644 index 000000000..51a8a5b03 --- /dev/null +++ b/tests/common/mod.rs @@ -0,0 +1,4 @@ +pub mod app; +pub mod fixtures; +pub mod http; +pub mod udp; diff --git a/tests/common/udp.rs b/tests/common/udp.rs new file mode 100644 index 000000000..3d84e2b97 --- /dev/null +++ b/tests/common/udp.rs @@ -0,0 +1,41 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::net::UdpSocket; + +/// A generic UDP client +pub struct Client { + pub socket: Arc, +} + +impl Client { + #[allow(dead_code)] + pub async fn connected(remote_socket_addr: &SocketAddr, local_socket_addr: &SocketAddr) -> Client { + let client = Client::bind(local_socket_addr).await; + client.connect(remote_socket_addr).await; + client + } + + pub async fn bind(local_socket_addr: &SocketAddr) -> Self { + let socket = UdpSocket::bind(local_socket_addr).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } + + pub async fn connect(&self, remote_address: &SocketAddr) { + self.socket.connect(remote_address).await.unwrap(); + } + + #[allow(dead_code)] + pub async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + #[allow(dead_code)] + pub async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } +} diff --git a/tests/integration.rs b/tests/integration.rs new file mode 100644 index 000000000..5d66d9074 --- /dev/null +++ b/tests/integration.rs @@ -0,0 +1,7 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` +mod common; +mod servers; diff --git a/tests/servers/api/connection_info.rs b/tests/servers/api/connection_info.rs new file mode 100644 index 000000000..35314a2fd --- /dev/null +++ b/tests/servers/api/connection_info.rs @@ -0,0 +1,29 @@ +pub fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::authenticated(bind_address, "invalid token") +} + +pub fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::anonymous(bind_address) +} + +#[derive(Clone)] +pub struct ConnectionInfo { + pub bind_address: String, + pub api_token: Option, +} + +impl ConnectionInfo { + pub fn authenticated(bind_address: &str, api_token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: Some(api_token.to_string()), + } + } + + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: None, + } + } +} diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs new file mode 100644 index 000000000..7022da9b4 --- /dev/null +++ b/tests/servers/api/mod.rs @@ -0,0 +1,14 @@ +use std::sync::Arc; + +use torrust_tracker::tracker::Tracker; + +pub mod connection_info; +pub mod test_environment; +pub mod v1; + +/// It forces a database error by dropping all tables. +/// That makes any query fail. +/// code-review: alternatively we could inject a database mock in the future. +pub fn force_database_error(tracker: &Arc) { + tracker.database.drop_database_tables().unwrap(); +} diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs new file mode 100644 index 000000000..dbb23dcfa --- /dev/null +++ b/tests/servers/api/test_environment.rs @@ -0,0 +1,105 @@ +use std::sync::Arc; + +use torrust_tracker::servers::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::Tracker; + +use super::connection_info::ConnectionInfo; +use crate::common::app::setup_with_configuration; + +#[allow(clippy::module_name_repetitions, dead_code)] +pub type StoppedTestEnvironment = TestEnvironment; +#[allow(clippy::module_name_repetitions)] +pub type RunningTestEnvironment = TestEnvironment; + +pub struct TestEnvironment { + pub cfg: Arc, + pub tracker: Arc, + pub state: S, +} + +#[allow(dead_code)] +pub struct Stopped { + api_server: StoppedApiServer, +} + +pub struct Running { + api_server: RunningApiServer, +} + +impl TestEnvironment { + /// Add a torrent to the tracker + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl TestEnvironment { + pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { + let cfg = Arc::new(cfg); + + let tracker = setup_with_configuration(&cfg); + + let api_server = api_server(cfg.http_api.clone()); + + Self { + cfg, + tracker, + state: Stopped { api_server }, + } + } + + pub async fn start(self) -> TestEnvironment { + TestEnvironment { + cfg: self.cfg, + tracker: self.tracker.clone(), + state: Running { + api_server: self.state.api_server.start(self.tracker).await.unwrap(), + }, + } + } + + pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpApi { + &mut self.state.api_server.cfg + } +} + +impl TestEnvironment { + pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { + let test_env = StoppedTestEnvironment::new_stopped(cfg); + + test_env.start().await + } + + pub async fn stop(self) -> TestEnvironment { + TestEnvironment { + cfg: self.cfg, + tracker: self.tracker, + state: Stopped { + api_server: self.state.api_server.stop().await.unwrap(), + }, + } + } + + pub fn get_connection_info(&self) -> ConnectionInfo { + ConnectionInfo { + bind_address: self.state.api_server.state.bind_addr.to_string(), + api_token: self.state.api_server.cfg.access_tokens.get("admin").cloned(), + } + } +} + +#[allow(clippy::module_name_repetitions)] +pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { + TestEnvironment::new_stopped(cfg) +} + +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { + TestEnvironment::new_running(cfg).await +} + +pub fn api_server(cfg: torrust_tracker_configuration::HttpApi) -> StoppedApiServer { + ApiServer::new(cfg) +} diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs new file mode 100644 index 000000000..1b1f204a2 --- /dev/null +++ b/tests/servers/api/v1/asserts.rs @@ -0,0 +1,139 @@ +// code-review: should we use macros to return the exact line where the assert fails? + +use reqwest::Response; +use torrust_tracker::servers::apis::v1::context::auth_key::resources::AuthKey; +use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; + +// Resource responses + +pub async fn assert_stats(response: Response, stats: Stats) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), stats); +} + +pub async fn assert_torrent_list(response: Response, torrents: Vec) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::>().await.unwrap(), torrents); +} + +pub async fn assert_torrent_info(response: Response, torrent: Torrent) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), torrent); +} + +pub async fn assert_auth_key_utf8(response: Response) -> AuthKey { + assert_eq!(response.status(), 200); + assert_eq!( + response.headers().get("content-type").unwrap(), + "application/json; charset=utf-8" + ); + response.json::().await.unwrap() +} + +// OK response + +pub async fn assert_ok(response: Response) { + let response_status = response.status(); + let response_headers = response.headers().get("content-type").cloned().unwrap(); + let response_text = response.text().await.unwrap(); + + let details = format!( + r#" + status: ´{response_status}´ + headers: ´{response_headers:?}´ + text: ´"{response_text}"´"# + ); + + assert_eq!(response_status, 200, "details:{details}."); + assert_eq!(response_headers, "application/json", "\ndetails:{details}."); + assert_eq!(response_text, "{\"status\":\"ok\"}", "\ndetails:{details}."); +} + +// Error responses + +pub async fn assert_bad_request(response: Response, body: &str) { + assert_eq!(response.status(), 400); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), body); +} + +pub async fn assert_not_found(response: Response) { + assert_eq!(response.status(), 404); + // todo: missing header in the response + //assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), ""); +} + +pub async fn assert_torrent_not_known(response: Response) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); +} + +pub async fn assert_invalid_infohash_param(response: Response, invalid_infohash: &str) { + assert_bad_request( + response, + &format!("Invalid URL: invalid infohash param: string \"{invalid_infohash}\", expected a 40 character long string"), + ) + .await; +} + +pub async fn assert_invalid_auth_key_param(response: Response, invalid_auth_key: &str) { + assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key)).await; +} + +pub async fn assert_invalid_key_duration_param(response: Response, invalid_key_duration: &str) { + assert_bad_request( + response, + &format!("Invalid URL: Cannot parse `\"{invalid_key_duration}\"` to a `u64`"), + ) + .await; +} + +pub async fn assert_token_not_valid(response: Response) { + assert_unhandled_rejection(response, "token not valid").await; +} + +pub async fn assert_unauthorized(response: Response) { + assert_unhandled_rejection(response, "unauthorized").await; +} + +pub async fn assert_failed_to_remove_torrent_from_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to remove torrent from whitelist").await; +} + +pub async fn assert_failed_to_whitelist_torrent(response: Response) { + assert_unhandled_rejection(response, "failed to whitelist torrent").await; +} + +pub async fn assert_failed_to_reload_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to reload whitelist").await; +} + +pub async fn assert_failed_to_generate_key(response: Response) { + assert_unhandled_rejection(response, "failed to generate key").await; +} + +pub async fn assert_failed_to_delete_key(response: Response) { + assert_unhandled_rejection(response, "failed to delete key").await; +} + +pub async fn assert_failed_to_reload_keys(response: Response) { + assert_unhandled_rejection(response, "failed to reload keys").await; +} + +async fn assert_unhandled_rejection(response: Response, reason: &str) { + assert_eq!(response.status(), 500); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + + let reason_text = format!("Unhandled rejection: Err {{ reason: \"{reason}"); + let response_text = response.text().await.unwrap(); + assert!( + response_text.contains(&reason_text), + ":\n response: `\"{response_text}\"`\n dose not contain: `\"{reason_text}\"`." + ); +} diff --git a/tests/servers/api/v1/client.rs b/tests/servers/api/v1/client.rs new file mode 100644 index 000000000..2b6db2e77 --- /dev/null +++ b/tests/servers/api/v1/client.rs @@ -0,0 +1,116 @@ +use reqwest::Response; + +use crate::common::http::{Query, QueryParam, ReqwestQuery}; +use crate::servers::api::connection_info::ConnectionInfo; + +/// API Client +pub struct Client { + connection_info: ConnectionInfo, + base_path: String, +} + +impl Client { + pub fn new(connection_info: ConnectionInfo) -> Self { + Self { + connection_info, + base_path: "/api/v1/".to_string(), + } + } + + pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { + self.post(&format!("key/{}", &seconds_valid)).await + } + + pub async fn delete_auth_key(&self, key: &str) -> Response { + self.delete(&format!("key/{}", &key)).await + } + + pub async fn reload_keys(&self) -> Response { + self.get("keys/reload", Query::default()).await + } + + pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { + self.post(&format!("whitelist/{}", &info_hash)).await + } + + pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { + self.delete(&format!("whitelist/{}", &info_hash)).await + } + + pub async fn reload_whitelist(&self) -> Response { + self.get("whitelist/reload", Query::default()).await + } + + pub async fn get_torrent(&self, info_hash: &str) -> Response { + self.get(&format!("torrent/{}", &info_hash), Query::default()).await + } + + pub async fn get_torrents(&self, params: Query) -> Response { + self.get("torrents", params).await + } + + pub async fn get_tracker_statistics(&self) -> Response { + self.get("stats", Query::default()).await + } + + pub async fn get(&self, path: &str, params: Query) -> Response { + let mut query: Query = params; + + if let Some(token) = &self.connection_info.api_token { + query.add_param(QueryParam::new("token", token)); + }; + + self.get_request_with_query(path, query).await + } + + pub async fn post(&self, path: &str) -> Response { + reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + async fn delete(&self, path: &str) -> Response { + reqwest::Client::new() + .delete(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + pub async fn get_request_with_query(&self, path: &str, params: Query) -> Response { + get(&self.base_url(path), Some(params)).await + } + + pub async fn get_request(&self, path: &str) -> Response { + get(&self.base_url(path), None).await + } + + fn query_with_token(&self) -> Query { + match &self.connection_info.api_token { + Some(token) => Query::params([QueryParam::new("token", token)].to_vec()), + None => Query::default(), + } + } + + fn base_url(&self, path: &str) -> String { + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + } +} + +async fn get(path: &str, query: Option) -> Response { + match query { + Some(params) => reqwest::Client::builder() + .build() + .unwrap() + .get(path) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap(), + None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), + } +} diff --git a/tests/servers/api/v1/contract/authentication.rs b/tests/servers/api/v1/contract/authentication.rs new file mode 100644 index 000000000..fb8de1810 --- /dev/null +++ b/tests/servers/api/v1/contract/authentication.rs @@ -0,0 +1,83 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::common::http::{Query, QueryParam}; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; +use crate::servers::api::v1::client::Client; + +#[tokio::test] +async fn should_authenticate_requests_by_using_a_token_query_param() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let token = test_env.get_connection_info().api_token.unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) + .await; + + assert_eq!(response.status(), 200); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_missing() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::default()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_empty() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) + .await; + + assert_token_not_valid(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) + .await; + + assert_token_not_valid(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let token = test_env.get_connection_info().api_token.unwrap(); + + // At the beginning of the query component + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?token={token}&limit=1")) + .await; + + assert_eq!(response.status(), 200); + + // At the end of the query component + let response = Client::new(test_env.get_connection_info()) + .get_request(&format!("torrents?limit=1&token={token}")) + .await; + + assert_eq!(response.status(), 200); + + test_env.stop().await; +} diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs new file mode 100644 index 000000000..e4b608607 --- /dev/null +++ b/tests/servers/api/v1/contract/configuration.rs @@ -0,0 +1,17 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::servers::api::test_environment::stopped_test_environment; + +#[tokio::test] +#[should_panic] +async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { + let mut test_env = stopped_test_environment(configuration::ephemeral()); + + let cfg = test_env.config_mut(); + + cfg.ssl_enabled = true; + cfg.ssl_key_path = Some("bad key path".to_string()); + cfg.ssl_cert_path = Some("bad cert path".to_string()); + + test_env.start().await; +} diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs new file mode 100644 index 000000000..a99272e84 --- /dev/null +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -0,0 +1,265 @@ +use std::time::Duration; + +use torrust_tracker::tracker::auth::Key; +use torrust_tracker_test_helpers::configuration; + +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::force_database_error; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{ + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::servers::api::v1::client::Client; + +#[tokio::test] +async fn should_allow_generating_a_new_auth_key() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + + let response = Client::new(test_env.get_connection_info()) + .generate_auth_key(seconds_valid) + .await; + + let auth_key_resource = assert_auth_key_utf8(response).await; + + // Verify the key with the tracker + assert!(test_env + .tracker + .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; + + for invalid_key_duration in invalid_key_durations { + let response = Client::new(test_env.get_connection_info()) + .post(&format!("key/{invalid_key_duration}")) + .await; + + assert_invalid_key_duration_param(response, invalid_key_duration).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_auth_key_cannot_be_generated() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + force_database_error(&test_env.tracker); + + let seconds_valid = 60; + let response = Client::new(test_env.get_connection_info()) + .generate_auth_key(seconds_valid) + .await; + + assert_failed_to_generate_key(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_deleting_an_auth_key() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_auth_keys = [ + // "", it returns a 404 + // " ", it returns a 404 + "0", + "-1", + "INVALID AUTH KEY ID", + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8", // 32 char key cspell:disable-line + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line + ]; + + for invalid_auth_key in &invalid_auth_keys { + let response = Client::new(test_env.get_connection_info()) + .delete_auth_key(invalid_auth_key) + .await; + + assert_invalid_auth_key_param(response, invalid_auth_key).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_auth_key_cannot_be_deleted() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_failed_to_delete_key(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + + // Generate new auth key + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_token_not_valid(response).await; + + // Generate new auth key + let auth_key = test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_reloading_keys() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(test_env.get_connection_info()).reload_keys().await; + + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_keys_cannot_be_reloaded() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()).reload_keys().await; + + assert_failed_to_reload_keys(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_reloading_keys_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let seconds_valid = 60; + test_env + .tracker + .generate_auth_key(Duration::from_secs(seconds_valid)) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .reload_keys() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .reload_keys() + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} diff --git a/tests/servers/api/v1/contract/context/mod.rs b/tests/servers/api/v1/contract/context/mod.rs new file mode 100644 index 000000000..6d3fb7566 --- /dev/null +++ b/tests/servers/api/v1/contract/context/mod.rs @@ -0,0 +1,4 @@ +pub mod auth_key; +pub mod stats; +pub mod torrent; +pub mod whitelist; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs new file mode 100644 index 000000000..45f7e604a --- /dev/null +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -0,0 +1,71 @@ +use std::str::FromStr; + +use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; + +use crate::common::fixtures::PeerBuilder; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; +use crate::servers::api::v1::client::Client; + +#[tokio::test] +async fn should_allow_getting_tracker_statistics() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + test_env + .add_torrent_peer( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &PeerBuilder::default().into(), + ) + .await; + + let response = Client::new(test_env.get_connection_info()).get_tracker_statistics().await; + + assert_stats( + response, + Stats { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }, + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_tracker_statistics() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .get_tracker_statistics() + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs new file mode 100644 index 000000000..ab497787f --- /dev/null +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -0,0 +1,251 @@ +use std::str::FromStr; + +use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; +use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; + +use crate::common::fixtures::PeerBuilder; +use crate::common::http::{Query, QueryParam}; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{ + assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, + assert_torrent_list, assert_torrent_not_known, assert_unauthorized, +}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::contract::fixtures::{ + invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, +}; + +#[tokio::test] +async fn should_allow_getting_torrents() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + + let response = Client::new(test_env.get_connection_info()).get_torrents(Query::empty()).await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_limiting_the_torrents_in_the_result() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) + .await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_the_torrents_result_pagination() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) + .await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: None, // Torrent list does not include the peer list for each torrent + }], + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; + + for invalid_offset in &invalid_offsets { + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; + + for invalid_limit in &invalid_limits { + let response = Client::new(test_env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_torrents_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrents(Query::empty()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .get_torrents(Query::default()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_getting_a_torrent_info() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let peer = PeerBuilder::default().into(); + + test_env.add_torrent_peer(&info_hash, &peer).await; + + let response = Client::new(test_env.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_info( + response, + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![Peer::from(peer)]), + }, + ) + .await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_not_known(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(test_env.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(test_env.get_connection_info()) + .get_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .get_torrent(&info_hash.to_string()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .get_torrent(&info_hash.to_string()) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs new file mode 100644 index 000000000..60ab4c901 --- /dev/null +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -0,0 +1,260 @@ +use std::str::FromStr; + +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; + +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::force_database_error; +use crate::servers::api::test_environment::running_test_environment; +use crate::servers::api::v1::asserts::{ + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, + assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::contract::fixtures::{ + invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, +}; + +#[tokio::test] +async fn should_allow_whitelisting_a_torrent() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_ok(response).await; + assert!( + test_env + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let api_client = Client::new(test_env.get_connection_info()); + + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; + + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .whitelist_a_torrent(&info_hash) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .whitelist_a_torrent(&info_hash) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_torrent_cannot_be_whitelisted() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(&info_hash) + .await; + + assert_failed_to_whitelist_torrent(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(test_env.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_removing_a_torrent_from_the_whitelist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_ok(response).await; + assert!(!test_env.tracker.is_info_hash_whitelisted(&info_hash).await); + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) + .await; + + assert_ok(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_failed_to_remove_torrent_from_whitelist(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token( + test_env.get_connection_info().bind_address.as_str(), + )) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_token_not_valid(response).await; + + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_unauthorized(response).await; + + test_env.stop().await; +} + +#[tokio::test] +async fn should_allow_reload_the_whitelist_from_the_database() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + + assert_ok(response).await; + /* todo: this assert fails because the whitelist has not been reloaded yet. + We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent + is whitelisted and use that endpoint to check if the torrent is still there after reloading. + assert!( + !(test_env + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await) + ); + */ + + test_env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&test_env.tracker); + + let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + + assert_failed_to_reload_whitelist(response).await; + + test_env.stop().await; +} diff --git a/tests/servers/api/v1/contract/fixtures.rs b/tests/servers/api/v1/contract/fixtures.rs new file mode 100644 index 000000000..6d147f190 --- /dev/null +++ b/tests/servers/api/v1/contract/fixtures.rs @@ -0,0 +1,13 @@ +use crate::common::fixtures::invalid_info_hashes; + +// When these infohashes are used in URL path params +// the response is a custom response returned in the handler +pub fn invalid_infohashes_returning_bad_request() -> Vec { + invalid_info_hashes() +} + +// When these infohashes are used in URL path params +// the response is an Axum response returned in the handler +pub fn invalid_infohashes_returning_not_found() -> Vec { + [String::new(), " ".to_string()].to_vec() +} diff --git a/tests/servers/api/v1/contract/mod.rs b/tests/servers/api/v1/contract/mod.rs new file mode 100644 index 000000000..38b4a2b37 --- /dev/null +++ b/tests/servers/api/v1/contract/mod.rs @@ -0,0 +1,4 @@ +pub mod authentication; +pub mod configuration; +pub mod context; +pub mod fixtures; diff --git a/tests/servers/api/v1/mod.rs b/tests/servers/api/v1/mod.rs new file mode 100644 index 000000000..37298b377 --- /dev/null +++ b/tests/servers/api/v1/mod.rs @@ -0,0 +1,3 @@ +pub mod asserts; +pub mod client; +pub mod contract; diff --git a/tests/servers/http/asserts.rs b/tests/servers/http/asserts.rs new file mode 100644 index 000000000..3a2e67bf0 --- /dev/null +++ b/tests/servers/http/asserts.rs @@ -0,0 +1,145 @@ +use std::panic::Location; + +use reqwest::Response; + +use super::responses::announce::{Announce, Compact, DeserializedCompact}; +use super::responses::scrape; +use crate::servers::http::responses::error::Error; + +pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { + let error_failure_reason = serde_bencode::from_str::(response_text) + .unwrap_or_else(|_| panic!( + "response body should be a valid bencoded string for the '{expected_failure_reason}' error, got \"{response_text}\"" + ) + ) + .failure_reason; + + assert!( + error_failure_reason.contains(expected_failure_reason), + r#": + response: `"{error_failure_reason}"` + does not contain: `"{expected_failure_reason}"`, {location}"# + ); +} + +pub async fn assert_empty_announce_response(response: Response) { + assert_eq!(response.status(), 200); + let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); + assert!(announce_response.peers.is_empty()); +} + +pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { + assert_eq!(response.status(), 200); + + let body = response.bytes().await.unwrap(); + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); + + assert_eq!(announce_response, *expected_announce_response); +} + +pub async fn assert_compact_announce_response(response: Response, expected_response: &Compact) { + assert_eq!(response.status(), 200); + + let bytes = response.bytes().await.unwrap(); + + let compact_announce = DeserializedCompact::from_bytes(&bytes).unwrap_or_else(|_| { + panic!( + "response body should be a valid compact announce response, got \"{:?}\"", + &bytes + ) + }); + + let actual_response = Compact::from(compact_announce); + + assert_eq!(actual_response, *expected_response); +} + +/// Sample bencoded scrape response as byte array: +/// +/// ```text +/// b"d5:filesd20:\x9c8B\"\x13\xe3\x0b\xff!+0\xc3`\xd2o\x9a\x02\x13d\"d8:completei1e10:downloadedi0e10:incompletei0eeee" +/// ``` +pub async fn assert_scrape_response(response: Response, expected_response: &scrape::Response) { + assert_eq!(response.status(), 200); + + let scrape_response = scrape::Response::try_from_bencoded(&response.bytes().await.unwrap()).unwrap(); + + assert_eq!(scrape_response, *expected_response); +} + +pub async fn assert_is_announce_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let _announce_response: Announce = serde_bencode::from_str(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{}\"", &body)); +} + +// Error responses + +// Specific errors for announce request + +pub async fn assert_missing_query_params_for_announce_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for announce request", + Location::caller(), + ); +} + +pub async fn assert_bad_announce_request_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(" for announce request: {failure}")).await; +} + +// Specific errors for scrape request + +pub async fn assert_missing_query_params_for_scrape_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for scrape request", + Location::caller(), + ); +} + +// Other errors + +pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); +} + +pub async fn assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration)", + Location::caller(), + ); +} + +pub async fn assert_cannot_parse_query_param_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(": {failure}")).await; +} + +pub async fn assert_cannot_parse_query_params_error_response(response: Response, failure: &str) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + &format!("Cannot parse query params{failure}"), + Location::caller(), + ); +} + +pub async fn assert_authentication_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error(&response.text().await.unwrap(), "Authentication error", Location::caller()); +} diff --git a/tests/servers/http/client.rs b/tests/servers/http/client.rs new file mode 100644 index 000000000..f5cdca398 --- /dev/null +++ b/tests/servers/http/client.rs @@ -0,0 +1,99 @@ +use std::net::IpAddr; + +use reqwest::{Client as ReqwestClient, Response}; +use torrust_tracker::tracker::auth::Key; + +use super::requests::announce::{self, Query}; +use super::requests::scrape; + +/// HTTP Tracker Client +pub struct Client { + server_addr: std::net::SocketAddr, + reqwest_client: ReqwestClient, + key: Option, +} + +/// URL components in this context: +/// +/// ```text +/// http://127.0.0.1:62304/announce/YZ....rJ?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// \_____________________/\_______________/ \__________________________________________________________/ +/// | | | +/// base url path query +/// ``` +impl Client { + pub fn new(server_addr: std::net::SocketAddr) -> Self { + Self { + server_addr, + reqwest_client: reqwest::Client::builder().build().unwrap(), + key: None, + } + } + + /// Creates the new client binding it to an specific local address + pub fn bind(server_addr: std::net::SocketAddr, local_address: IpAddr) -> Self { + Self { + server_addr, + reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), + key: None, + } + } + + pub fn authenticated(server_addr: std::net::SocketAddr, key: Key) -> Self { + Self { + server_addr, + reqwest_client: reqwest::Client::builder().build().unwrap(), + key: Some(key), + } + } + + pub async fn announce(&self, query: &announce::Query) -> Response { + self.get(&self.build_announce_path_and_query(query)).await + } + + pub async fn scrape(&self, query: &scrape::Query) -> Response { + self.get(&self.build_scrape_path_and_query(query)).await + } + + pub async fn announce_with_header(&self, query: &Query, key: &str, value: &str) -> Response { + self.get_with_header(&self.build_announce_path_and_query(query), key, value) + .await + } + + pub async fn get(&self, path: &str) -> Response { + self.reqwest_client.get(self.build_url(path)).send().await.unwrap() + } + + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { + self.reqwest_client + .get(self.build_url(path)) + .header(key, value) + .send() + .await + .unwrap() + } + + fn build_announce_path_and_query(&self, query: &announce::Query) -> String { + format!("{}?{query}", self.build_path("announce")) + } + + fn build_scrape_path_and_query(&self, query: &scrape::Query) -> String { + format!("{}?{query}", self.build_path("scrape")) + } + + fn build_path(&self, path: &str) -> String { + match &self.key { + Some(key) => format!("{path}/{key}"), + None => path.to_string(), + } + } + + fn build_url(&self, path: &str) -> String { + let base_url = self.base_url(); + format!("{base_url}{path}") + } + + fn base_url(&self) -> String { + format!("http://{}/", &self.server_addr) + } +} diff --git a/tests/servers/http/connection_info.rs b/tests/servers/http/connection_info.rs new file mode 100644 index 000000000..5736271fd --- /dev/null +++ b/tests/servers/http/connection_info.rs @@ -0,0 +1,16 @@ +use torrust_tracker::tracker::auth::Key; + +#[derive(Clone, Debug)] +pub struct ConnectionInfo { + pub bind_address: String, + pub key: Option, +} + +impl ConnectionInfo { + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + key: None, + } + } +} diff --git a/tests/servers/http/mod.rs b/tests/servers/http/mod.rs new file mode 100644 index 000000000..cb2885df0 --- /dev/null +++ b/tests/servers/http/mod.rs @@ -0,0 +1,28 @@ +pub mod asserts; +pub mod client; +pub mod requests; +pub mod responses; +pub mod test_environment; +pub mod v1; + +use percent_encoding::NON_ALPHANUMERIC; + +pub type ByteArray20 = [u8; 20]; + +pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { + percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() +} + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs new file mode 100644 index 000000000..20c5ddaa7 --- /dev/null +++ b/tests/servers/http/requests/announce.rs @@ -0,0 +1,260 @@ +use std::fmt; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; + +use serde_repr::Serialize_repr; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Id; + +use crate::servers::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: ByteArray20, + pub peer_addr: IpAddr, + pub downloaded: BaseTenASCII, + pub uploaded: BaseTenASCII, + pub peer_id: ByteArray20, + pub port: PortNumber, + pub left: BaseTenASCII, + pub event: Option, + pub compact: Option, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Announce Request: +/// +/// +/// +/// Some parameters in the specification are not implemented in this tracker yet. +impl Query { + /// It builds the URL query component for the announce request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + pub fn build(&self) -> String { + self.params().to_string() + } + + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub type BaseTenASCII = u64; +pub type PortNumber = u16; + +pub enum Event { + //Started, + //Stopped, + Completed, +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + //Event::Started => write!(f, "started"), + //Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(Serialize_repr, PartialEq, Debug)] +#[repr(u8)] +pub enum Compact { + Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} + +pub struct QueryBuilder { + announce_query: Query, +} + +impl QueryBuilder { + pub fn default() -> QueryBuilder { + let default_announce_query = Query { + info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: Id(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + }; + Self { + announce_query: default_announce_query, + } + } + + pub fn with_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.announce_query.info_hash = info_hash.0; + self + } + + pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + self.announce_query.peer_id = peer_id.0; + self + } + + pub fn with_compact(mut self, compact: Compact) -> Self { + self.announce_query.compact = Some(compact); + self + } + + pub fn with_peer_addr(mut self, peer_addr: &IpAddr) -> Self { + self.announce_query.peer_addr = *peer_addr; + self + } + + pub fn without_compact(mut self) -> Self { + self.announce_query.compact = None; + self + } + + pub fn query(self) -> Query { + self.announce_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Announce request. +/// +/// Sample Announce URL with all the GET parameters (mandatory and optional): +/// +/// ```text +/// http://127.0.0.1:7070/announce? +/// info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 (mandatory) +/// peer_addr=192.168.1.88 +/// downloaded=0 +/// uploaded=0 +/// peer_id=%2DqB00000000000000000 (mandatory) +/// port=17548 (mandatory) +/// left=0 +/// event=completed +/// compact=0 +/// ``` +pub struct QueryParams { + pub info_hash: Option, + pub peer_addr: Option, + pub downloaded: Option, + pub uploaded: Option, + pub peer_id: Option, + pub port: Option, + pub left: Option, + pub event: Option, + pub compact: Option, +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let mut params = vec![]; + + if let Some(info_hash) = &self.info_hash { + params.push(("info_hash", info_hash)); + } + if let Some(peer_addr) = &self.peer_addr { + params.push(("peer_addr", peer_addr)); + } + if let Some(downloaded) = &self.downloaded { + params.push(("downloaded", downloaded)); + } + if let Some(uploaded) = &self.uploaded { + params.push(("uploaded", uploaded)); + } + if let Some(peer_id) = &self.peer_id { + params.push(("peer_id", peer_id)); + } + if let Some(port) = &self.port { + params.push(("port", port)); + } + if let Some(left) = &self.left { + params.push(("left", left)); + } + if let Some(event) = &self.event { + params.push(("event", event)); + } + if let Some(compact) = &self.compact { + params.push(("compact", compact)); + } + + let query = params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(announce_query: &Query) -> Self { + let event = announce_query.event.as_ref().map(std::string::ToString::to_string); + let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); + + Self { + info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), + peer_addr: Some(announce_query.peer_addr.to_string()), + downloaded: Some(announce_query.downloaded.to_string()), + uploaded: Some(announce_query.uploaded.to_string()), + peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), + port: Some(announce_query.port.to_string()), + left: Some(announce_query.left.to_string()), + event, + compact, + } + } + + pub fn remove_optional_params(&mut self) { + // todo: make them optional with the Option<...> in the AnnounceQuery struct + // if they are really optional. So that we can crete a minimal AnnounceQuery + // instead of removing the optional params afterwards. + // + // The original specification on: + // + // says only `ip` and `event` are optional. + // + // On + // says only `ip`, `numwant`, `key` and `trackerid` are optional. + // + // but the server is responding if all these params are not included. + self.peer_addr = None; + self.downloaded = None; + self.uploaded = None; + self.left = None; + self.event = None; + self.compact = None; + } + + pub fn set(&mut self, param_name: &str, param_value: &str) { + match param_name { + "info_hash" => self.info_hash = Some(param_value.to_string()), + "peer_addr" => self.peer_addr = Some(param_value.to_string()), + "downloaded" => self.downloaded = Some(param_value.to_string()), + "uploaded" => self.uploaded = Some(param_value.to_string()), + "peer_id" => self.peer_id = Some(param_value.to_string()), + "port" => self.port = Some(param_value.to_string()), + "left" => self.left = Some(param_value.to_string()), + "event" => self.event = Some(param_value.to_string()), + "compact" => self.compact = Some(param_value.to_string()), + &_ => panic!("Invalid param name for announce query"), + } + } +} diff --git a/tests/servers/http/requests/mod.rs b/tests/servers/http/requests/mod.rs new file mode 100644 index 000000000..776d2dfbf --- /dev/null +++ b/tests/servers/http/requests/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod scrape; diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs new file mode 100644 index 000000000..9e4257d6c --- /dev/null +++ b/tests/servers/http/requests/scrape.rs @@ -0,0 +1,118 @@ +use std::fmt; +use std::str::FromStr; + +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + +use crate::servers::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: Vec, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Scrape Request: +/// +/// +impl Query { + /// It builds the URL query component for the scrape request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + pub fn build(&self) -> String { + self.params().to_string() + } + + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub struct QueryBuilder { + scrape_query: Query, +} + +impl QueryBuilder { + pub fn default() -> QueryBuilder { + let default_scrape_query = Query { + info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), + }; + Self { + scrape_query: default_scrape_query, + } + } + + pub fn with_one_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash = [info_hash.0].to_vec(); + self + } + + pub fn add_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash.push(info_hash.0); + self + } + + pub fn query(self) -> Query { + self.scrape_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Scrape request. +/// +/// The `info_hash` param is the percent encoded of the the 20-byte array info hash. +/// +/// Sample Scrape URL with all the GET parameters: +/// +/// For `IpV4`: +/// +/// ```text +/// http://127.0.0.1:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// For `IpV6`: +/// +/// ```text +/// http://[::1]:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// You can add as many info hashes as you want, just adding the same param again. +pub struct QueryParams { + pub info_hash: Vec, +} + +impl QueryParams { + pub fn set_one_info_hash_param(&mut self, info_hash: &str) { + self.info_hash = vec![info_hash.to_string()]; + } +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let query = self + .info_hash + .iter() + .map(|info_hash| format!("info_hash={}", &info_hash)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(scrape_query: &Query) -> Self { + let info_hashes = scrape_query + .info_hash + .iter() + .map(percent_encode_byte_array) + .collect::>(); + + Self { info_hash: info_hashes } + } +} diff --git a/tests/servers/http/responses/announce.rs b/tests/servers/http/responses/announce.rs new file mode 100644 index 000000000..8a07ebd5e --- /dev/null +++ b/tests/servers/http/responses/announce.rs @@ -0,0 +1,115 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{self, Deserialize, Serialize}; +use torrust_tracker::tracker::peer::Peer; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, // Peers using IPV4 and IPV6 +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DictionaryPeer { + pub ip: String, + #[serde(rename = "peer id")] + #[serde(with = "serde_bytes")] + pub peer_id: Vec, + pub port: u16, +} + +impl From for DictionaryPeer { + fn from(peer: Peer) -> Self { + DictionaryPeer { + peer_id: peer.peer_id.to_bytes().to_vec(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DeserializedCompact { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + #[serde(with = "serde_bytes")] + pub peers: Vec, +} + +impl DeserializedCompact { + pub fn from_bytes(bytes: &[u8]) -> Result { + serde_bencode::from_bytes::(bytes) + } +} + +#[derive(Debug, PartialEq)] +pub struct Compact { + // code-review: there could be a way to deserialize this struct directly + // by using serde instead of doing it manually. Or at least using a custom deserializer. + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + pub min_interval: u32, + pub peers: CompactPeerList, +} + +#[derive(Debug, PartialEq)] +pub struct CompactPeerList { + peers: Vec, +} + +impl CompactPeerList { + pub fn new(peers: Vec) -> Self { + Self { peers } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeer { + ip: Ipv4Addr, + port: u16, +} + +impl CompactPeer { + pub fn new(socket_addr: &SocketAddr) -> Self { + match socket_addr.ip() { + IpAddr::V4(ip) => Self { + ip, + port: socket_addr.port(), + }, + IpAddr::V6(_ip) => panic!("IPV6 is not supported for compact peer"), + } + } + + pub fn new_from_bytes(bytes: &[u8]) -> Self { + Self { + ip: Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), + port: u16::from_be_bytes([bytes[4], bytes[5]]), + } + } +} + +impl From for Compact { + fn from(compact_announce: DeserializedCompact) -> Self { + let mut peers = vec![]; + + for peer_bytes in compact_announce.peers.chunks_exact(6) { + peers.push(CompactPeer::new_from_bytes(peer_bytes)); + } + + Self { + complete: compact_announce.complete, + incomplete: compact_announce.incomplete, + interval: compact_announce.interval, + min_interval: compact_announce.min_interval, + peers: CompactPeerList::new(peers), + } + } +} diff --git a/tests/servers/http/responses/error.rs b/tests/servers/http/responses/error.rs new file mode 100644 index 000000000..12c53a0cf --- /dev/null +++ b/tests/servers/http/responses/error.rs @@ -0,0 +1,7 @@ +use serde::{self, Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/tests/servers/http/responses/mod.rs b/tests/servers/http/responses/mod.rs new file mode 100644 index 000000000..bdc689056 --- /dev/null +++ b/tests/servers/http/responses/mod.rs @@ -0,0 +1,3 @@ +pub mod announce; +pub mod error; +pub mod scrape; diff --git a/tests/servers/http/responses/scrape.rs b/tests/servers/http/responses/scrape.rs new file mode 100644 index 000000000..221ff0a38 --- /dev/null +++ b/tests/servers/http/responses/scrape.rs @@ -0,0 +1,196 @@ +use std::collections::HashMap; +use std::str; + +use serde::{self, Deserialize, Serialize}; +use serde_bencode::value::Value; + +use crate::servers::http::{ByteArray20, InfoHash}; + +#[derive(Debug, PartialEq, Default)] +pub struct Response { + pub files: HashMap, +} + +impl Response { + pub fn with_one_file(info_hash_bytes: ByteArray20, file: File) -> Self { + let mut files: HashMap = HashMap::new(); + files.insert(info_hash_bytes, file); + Self { files } + } + + pub fn try_from_bencoded(bytes: &[u8]) -> Result { + let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); + Self::try_from(scrape_response) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +pub struct File { + pub complete: i64, // The number of active peers that have completed downloading + pub downloaded: i64, // The number of peers that have ever completed downloading + pub incomplete: i64, // The number of active peers that have not completed downloading +} + +impl File { + pub fn zeroed() -> Self { + Self::default() + } +} + +impl TryFrom for Response { + type Error = BencodeParseError; + + fn try_from(scrape_response: DeserializedResponse) -> Result { + parse_bencoded_response(&scrape_response.files) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +struct DeserializedResponse { + pub files: Value, +} + +pub struct ResponseBuilder { + response: Response, +} + +impl ResponseBuilder { + pub fn default() -> Self { + Self { + response: Response::default(), + } + } + + pub fn add_file(mut self, info_hash_bytes: ByteArray20, file: File) -> Self { + self.response.files.insert(info_hash_bytes, file); + self + } + + pub fn build(self) -> Response { + self.response + } +} + +#[derive(Debug)] +pub enum BencodeParseError { + InvalidValueExpectedDict { value: Value }, + InvalidValueExpectedInt { value: Value }, + InvalidFileField { value: Value }, + MissingFileField { field_name: String }, +} + +/// It parses a bencoded scrape response into a `Response` struct. +/// +/// For example: +/// +/// ```text +/// d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e +/// 20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +/// ``` +/// +/// Response (JSON encoded for readability): +/// +/// ```text +/// { +/// 'files': { +/// 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +/// 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +/// } +/// } +fn parse_bencoded_response(value: &Value) -> Result { + let mut files: HashMap = HashMap::new(); + + match value { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = parse_bencoded_file(file_value).unwrap(); + + files.insert(InfoHash::new(info_hash_byte_vec).bytes(), file); + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + } + + Ok(Response { files }) +} + +/// It parses a bencoded dictionary into a `File` struct. +/// +/// For example: +/// +/// +/// ```text +/// d8:completei11e10:downloadedi13772e10:incompletei19ee +/// ``` +/// +/// into: +/// +/// ```text +/// File { +/// complete: 11, +/// downloaded: 13772, +/// incomplete: 19, +/// } +/// ``` +fn parse_bencoded_file(value: &Value) -> Result { + let file = match &value { + Value::Dict(dict) => { + let mut complete = None; + let mut downloaded = None; + let mut incomplete = None; + + for file_field in dict { + let field_name = file_field.0; + + let field_value = match file_field.1 { + Value::Int(number) => Ok(*number), + _ => Err(BencodeParseError::InvalidValueExpectedInt { + value: file_field.1.clone(), + }), + }?; + + if field_name == b"complete" { + complete = Some(field_value); + } else if field_name == b"downloaded" { + downloaded = Some(field_value); + } else if field_name == b"incomplete" { + incomplete = Some(field_value); + } else { + return Err(BencodeParseError::InvalidFileField { + value: file_field.1.clone(), + }); + } + } + + if complete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "complete".to_string(), + }); + } + + if downloaded.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "downloaded".to_string(), + }); + } + + if incomplete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "incomplete".to_string(), + }); + } + + File { + complete: complete.unwrap(), + downloaded: downloaded.unwrap(), + incomplete: incomplete.unwrap(), + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + }; + + Ok(file) +} diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs new file mode 100644 index 000000000..8d0aaba02 --- /dev/null +++ b/tests/servers/http/test_environment.rs @@ -0,0 +1,120 @@ +use std::sync::Arc; + +use torrust_tracker::servers::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::Tracker; + +use crate::common::app::setup_with_configuration; + +#[allow(clippy::module_name_repetitions, dead_code)] +pub type StoppedTestEnvironment = TestEnvironment>; +#[allow(clippy::module_name_repetitions)] +pub type RunningTestEnvironment = TestEnvironment>; + +pub struct TestEnvironment { + pub cfg: Arc, + pub tracker: Arc, + pub state: S, +} + +#[allow(dead_code)] +pub struct Stopped { + http_server: StoppedHttpServer, +} + +pub struct Running { + http_server: RunningHttpServer, +} + +impl TestEnvironment { + /// Add a torrent to the tracker + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl TestEnvironment> { + #[allow(dead_code)] + pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { + let cfg = Arc::new(cfg); + + let tracker = setup_with_configuration(&cfg); + + let http_server = http_server(cfg.http_trackers[0].clone()); + + Self { + cfg, + tracker, + state: Stopped { http_server }, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> TestEnvironment> { + TestEnvironment { + cfg: self.cfg, + tracker: self.tracker.clone(), + state: Running { + http_server: self.state.http_server.start(self.tracker).await.unwrap(), + }, + } + } + + #[allow(dead_code)] + pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { + &self.state.http_server.cfg + } + + #[allow(dead_code)] + pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpTracker { + &mut self.state.http_server.cfg + } +} + +impl TestEnvironment> { + pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { + let test_env = StoppedTestEnvironment::new_stopped(cfg); + + test_env.start().await + } + + pub async fn stop(self) -> TestEnvironment> { + TestEnvironment { + cfg: self.cfg, + tracker: self.tracker, + state: Stopped { + http_server: self.state.http_server.stop().await.unwrap(), + }, + } + } + + pub fn bind_address(&self) -> &std::net::SocketAddr { + &self.state.http_server.state.bind_addr + } + + #[allow(dead_code)] + pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { + &self.state.http_server.cfg + } +} + +#[allow(clippy::module_name_repetitions, dead_code)] +pub fn stopped_test_environment( + cfg: torrust_tracker_configuration::Configuration, +) -> StoppedTestEnvironment { + TestEnvironment::new_stopped(cfg) +} + +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment( + cfg: torrust_tracker_configuration::Configuration, +) -> RunningTestEnvironment { + TestEnvironment::new_running(cfg).await +} + +pub fn http_server(cfg: torrust_tracker_configuration::HttpTracker) -> StoppedHttpServer { + let http_server = I::new(); + + HttpServer::new(cfg, http_server) +} diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs new file mode 100644 index 000000000..b508dfc39 --- /dev/null +++ b/tests/servers/http/v1/contract.rs @@ -0,0 +1,1425 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::servers::http::test_environment::running_test_environment; + +pub type V1 = torrust_tracker::servers::http::v1::launcher::Launcher; + +#[tokio::test] +async fn test_environment_should_be_started_and_stopped() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + test_env.stop().await; +} + +mod for_all_config_modes { + + mod and_running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + // If the tracker is running behind a reverse proxy, the peer IP is the + // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. + + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(*test_env.bind_address()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; + + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + test_env.stop().await; + } + } + + mod receiving_an_announce_request { + + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::str::FromStr; + + use local_ip_address::local_ip; + use reqwest::Response; + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::servers::http::asserts::{ + assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, + assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, + assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, + }; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::{Compact, QueryBuilder}; + use crate::servers::http::responses; + use crate::servers::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_respond_if_only_the_mandatory_fields_are_provided() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + params.remove_optional_params(); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let response = Client::new(*test_env.bind_address()).get("announce").await; + + assert_missing_query_params_for_announce_request_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_url_query_parameters_are_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let invalid_query_param = "a=b=c"; + + let response = Client::new(*test_env.bind_address()) + .get(&format!("announce?{invalid_query_param}")) + .await; + + assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_a_mandatory_field_is_missing() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + // Without `info_hash` param + + let mut params = QueryBuilder::default().query().params(); + + params.info_hash = None; + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param info_hash").await; + + // Without `peer_id` param + + let mut params = QueryBuilder::default().query().params(); + + params.peer_id = None; + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param peer_id").await; + + // Without `port` param + + let mut params = QueryBuilder::default().query().params(); + + params.port = None; + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param port").await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_cannot_parse_query_params_error_response(response, "").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. + + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_downloaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_uploaded_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_peer_id_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; + + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_port_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("port", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_left_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("left", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_event_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase to be valid: `started` + "Stopped", // It should be lowercase to be valid: `stopped` + "Completed", // It should be lowercase to be valid: `completed` + ]; + + for invalid_value in invalid_values { + params.set("event", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_compact_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("compact", invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .query(), + ) + .await; + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![], + }, + ) + .await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2. This new peer is non included on the response peer list + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .query(), + ) + .await; + + // It should only contain the previously announced peer + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(previously_announced_peer)], + }, + ) + .await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Announce a peer using IPV4 + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + + // Announce a peer using IPV6 + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + )) + .build(); + test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + + // Announce the new Peer. + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000003")) + .query(), + ) + .await; + + // The newly announced peer is not included on the response peer list, + // but all the previously announced peers should be included regardless the IP version they are using. + assert_announce_response( + response, + &Announce { + complete: 3, + incomplete: 0, + interval: test_env.tracker.config.announce_interval, + min_interval: test_env.tracker.config.min_announce_interval, + peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], + }, + ) + .await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().build(); + + // Add a peer + test_env.add_torrent_peer(&info_hash, &peer).await; + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .query(); + + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + + let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; + + assert_empty_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_compact_response() { + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 accepting compact responses + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; + + let expected_response = responses::announce::Compact { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; + + assert_compact_announce_response(response, &expected_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_return_the_compact_response_by_default() { + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + + // Add the Peer 1 + test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; + + assert!(!is_a_compact_announce_response(response).await); + + test_env.stop().await; + } + + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 0); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + Client::new(*test_env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 0); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); + + let client = Client::bind(*test_env.bind_address(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ + + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2.137.87.41").unwrap(), + )) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(*test_env.bind_address(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ + + let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( + IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), + )) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let client = Client::bind(*test_env.bind_address(), client_ip); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + client.announce(&announce_query).await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + test_env.stop().await; + } + + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let client = Client::new(*test_env.bind_address()); + + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); + + client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await; + + let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + + test_env.stop().await; + } + } + + mod receiving_an_scrape_request { + + // Scrape documentation: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape + + use std::net::IpAddr; + use std::str::FromStr; + + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::servers::http::asserts::{ + assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, + assert_scrape_response, + }; + use crate::servers::http::client::Client; + use crate::servers::http::requests; + use crate::servers::http::requests::scrape::QueryBuilder; + use crate::servers::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + //#[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let response = Client::new(*test_env.bind_address()).get("scrape").await; + + assert_missing_query_params_for_scrape_request_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); + + let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + + assert_cannot_parse_query_params_error_response(response, "").await; + } + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_accept_multiple_infohashes() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = test_env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + + drop(stats); + + test_env.stop().await; + } + } +} + +mod configured_as_whitelisted { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_torrent_not_in_whitelist_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + } + + mod receiving_an_scrape_request { + use std::str::FromStr; + + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::PeerBuilder; + use crate::servers::http::asserts::assert_scrape_response; + use crate::servers::http::client::Client; + use crate::servers::http::requests; + use crate::servers::http::responses::scrape::{File, ResponseBuilder}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + test_env + .tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + } +} + +mod configured_as_private { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_respond_to_authenticated_peers() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) + .announce(&QueryBuilder::default().query()) + .await; + + assert_is_announce_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*test_env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_authentication_error_response(response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let invalid_key = "INVALID_KEY"; + + let response = Client::new(*test_env.bind_address()) + .get(&format!( + "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + // The tracker does not have this key + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), unregistered_key) + .announce(&QueryBuilder::default().query()) + .await; + + assert_authentication_error_response(response).await; + + test_env.stop().await; + } + } + + mod receiving_an_scrape_request { + + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker::tracker::auth::Key; + use torrust_tracker::tracker::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::common::fixtures::PeerBuilder; + use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests; + use crate::servers::http::responses::scrape::{File, ResponseBuilder}; + use crate::servers::http::test_environment::running_test_environment; + use crate::servers::http::v1::contract::V1; + + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let invalid_key = "INVALID_KEY"; + + let response = Client::new(*test_env.bind_address()) + .get(&format!( + "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let response = Client::new(*test_env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + // There is not authentication error + // code-review: should this really be this way? + + let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + test_env + .add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ) + .await; + + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + + let response = Client::authenticated(*test_env.bind_address(), false_key) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + test_env.stop().await; + } + } +} + +mod configured_as_private_and_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} +} diff --git a/tests/servers/http/v1/mod.rs b/tests/servers/http/v1/mod.rs new file mode 100644 index 000000000..2943dbb50 --- /dev/null +++ b/tests/servers/http/v1/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs new file mode 100644 index 000000000..c19f72020 --- /dev/null +++ b/tests/servers/mod.rs @@ -0,0 +1,5 @@ +extern crate rand; + +mod api; +mod http; +mod udp; diff --git a/tests/servers/udp/asserts.rs b/tests/servers/udp/asserts.rs new file mode 100644 index 000000000..bf8fb6728 --- /dev/null +++ b/tests/servers/udp/asserts.rs @@ -0,0 +1,23 @@ +use aquatic_udp_protocol::{Response, TransactionId}; + +pub fn is_error_response(response: &Response, error_message: &str) -> bool { + match response { + Response::Error(error_response) => error_response.message.starts_with(error_message), + _ => false, + } +} + +pub fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { + match response { + Response::Connect(connect_response) => connect_response.transaction_id == transaction_id, + _ => false, + } +} + +pub fn is_ipv4_announce_response(response: &Response) -> bool { + matches!(response, Response::AnnounceIpv4(_)) +} + +pub fn is_scrape_response(response: &Response) -> bool { + matches!(response, Response::Scrape(_)) +} diff --git a/tests/servers/udp/client.rs b/tests/servers/udp/client.rs new file mode 100644 index 000000000..75467055e --- /dev/null +++ b/tests/servers/udp/client.rs @@ -0,0 +1,84 @@ +use std::io::Cursor; +use std::sync::Arc; + +use aquatic_udp_protocol::{Request, Response}; +use tokio::net::UdpSocket; +use torrust_tracker::servers::udp::MAX_PACKET_SIZE; + +use crate::servers::udp::source_address; + +#[allow(clippy::module_name_repetitions)] +pub struct UdpClient { + pub socket: Arc, +} + +impl UdpClient { + pub async fn bind(local_address: &str) -> Self { + let socket = UdpSocket::bind(local_address).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } + + pub async fn connect(&self, remote_address: &str) { + self.socket.connect(remote_address).await.unwrap(); + } + + pub async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + pub async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } +} + +/// Creates a new `UdpClient` connected to a Udp server +pub async fn new_udp_client_connected(remote_address: &str) -> UdpClient { + let port = 0; // Let OS choose an unused port. + let client = UdpClient::bind(&source_address(port)).await; + client.connect(remote_address).await; + client +} + +#[allow(clippy::module_name_repetitions)] +pub struct UdpTrackerClient { + pub udp_client: UdpClient, +} + +impl UdpTrackerClient { + pub async fn send(&self, request: Request) -> usize { + // Write request into a buffer + let request_buffer = vec![0u8; MAX_PACKET_SIZE]; + let mut cursor = Cursor::new(request_buffer); + + let request_data = match request.write(&mut cursor) { + Ok(_) => { + #[allow(clippy::cast_possible_truncation)] + let position = cursor.position() as usize; + let inner_request_buffer = cursor.get_ref(); + // Return slice which contains written request data + &inner_request_buffer[..position] + } + Err(e) => panic!("could not write request to bytes: {e}."), + }; + + self.udp_client.send(request_data).await + } + + pub async fn receive(&self) -> Response { + let mut response_buffer = [0u8; MAX_PACKET_SIZE]; + + let payload_size = self.udp_client.receive(&mut response_buffer).await; + + Response::from_bytes(&response_buffer[..payload_size], true).unwrap() + } +} + +/// Creates a new `UdpTrackerClient` connected to a Udp Tracker server +pub async fn new_udp_tracker_client_connected(remote_address: &str) -> UdpTrackerClient { + let udp_client = new_udp_client_connected(remote_address).await; + UdpTrackerClient { udp_client } +} diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs new file mode 100644 index 000000000..3187d9871 --- /dev/null +++ b/tests/servers/udp/contract.rs @@ -0,0 +1,160 @@ +// UDP tracker documentation: +// +// BEP 15. UDP Tracker Protocol for BitTorrent +// https://www.bittorrent.org/beps/bep_0015.html + +use core::panic; + +use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; +use torrust_tracker::servers::udp::MAX_PACKET_SIZE; +use torrust_tracker_test_helpers::configuration; + +use crate::servers::udp::asserts::is_error_response; +use crate::servers::udp::client::{new_udp_client_connected, UdpTrackerClient}; +use crate::servers::udp::test_environment::running_test_environment; + +fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] +} + +fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] +} + +async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { + let connect_request = ConnectRequest { transaction_id }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + } +} + +#[tokio::test] +async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; + + client.send(&empty_udp_request()).await; + + let mut buffer = empty_buffer(); + client.receive(&mut buffer).await; + let response = Response::from_bytes(&buffer, true).unwrap(); + + assert!(is_error_response(&response, "bad request")); +} + +mod receiving_a_connection_request { + use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::asserts::is_connect_response; + use crate::servers::udp::client::new_udp_tracker_client_connected; + use crate::servers::udp::test_environment::running_test_environment; + + #[tokio::test] + async fn should_return_a_connect_response() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + + let connect_request = ConnectRequest { + transaction_id: TransactionId(123), + }; + + client.send(connect_request.into()).await; + + let response = client.receive().await; + + assert!(is_connect_response(&response, TransactionId(123))); + } +} + +mod receiving_an_announce_request { + use std::net::Ipv4Addr; + + use aquatic_udp_protocol::{ + AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, + TransactionId, + }; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::asserts::is_ipv4_announce_response; + use crate::servers::udp::client::new_udp_tracker_client_connected; + use crate::servers::udp::contract::send_connection_request; + use crate::servers::udp::test_environment::running_test_environment; + + #[tokio::test] + async fn should_return_an_announce_response() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send announce request + + let announce_request = AnnounceRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hash: InfoHash([0u8; 20]), + peer_id: PeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64), + bytes_uploaded: NumberOfBytes(0i64), + bytes_left: NumberOfBytes(0i64), + event: AnnounceEvent::Started, + ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), + key: PeerKey(0u32), + peers_wanted: NumberOfPeers(1i32), + port: Port(client.udp_client.socket.local_addr().unwrap().port()), + }; + + client.send(announce_request.into()).await; + + let response = client.receive().await; + + assert!(is_ipv4_announce_response(&response)); + } +} + +mod receiving_an_scrape_request { + use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::asserts::is_scrape_response; + use crate::servers::udp::client::new_udp_tracker_client_connected; + use crate::servers::udp::contract::send_connection_request; + use crate::servers::udp::test_environment::running_test_environment; + + #[tokio::test] + async fn should_return_a_scrape_response() { + let test_env = running_test_environment(configuration::ephemeral()).await; + + let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + + let connection_id = send_connection_request(TransactionId(123), &client).await; + + // Send scrape request + + // Full scrapes are not allowed you need to pass an array of info hashes otherwise + // it will return "bad request" error with empty vector + let info_hashes = vec![InfoHash([0u8; 20])]; + + let scrape_request = ScrapeRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId(123i32), + info_hashes, + }; + + client.send(scrape_request.into()).await; + + let response = client.receive().await; + + assert!(is_scrape_response(&response)); + } +} diff --git a/tests/servers/udp/mod.rs b/tests/servers/udp/mod.rs new file mode 100644 index 000000000..d39c37153 --- /dev/null +++ b/tests/servers/udp/mod.rs @@ -0,0 +1,9 @@ +pub mod asserts; +pub mod client; +pub mod contract; +pub mod test_environment; + +/// Generates the source address for the UDP client +fn source_address(port: u16) -> String { + format!("127.0.0.1:{port}") +} diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs new file mode 100644 index 000000000..15266d881 --- /dev/null +++ b/tests/servers/udp/test_environment.rs @@ -0,0 +1,100 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker::servers::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; +use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker::tracker::peer::Peer; +use torrust_tracker::tracker::Tracker; + +use crate::common::app::setup_with_configuration; + +#[allow(clippy::module_name_repetitions, dead_code)] +pub type StoppedTestEnvironment = TestEnvironment; +#[allow(clippy::module_name_repetitions)] +pub type RunningTestEnvironment = TestEnvironment; + +pub struct TestEnvironment { + pub cfg: Arc, + pub tracker: Arc, + pub state: S, +} + +#[allow(dead_code)] +pub struct Stopped { + udp_server: StoppedUdpServer, +} + +pub struct Running { + udp_server: RunningUdpServer, +} + +impl TestEnvironment { + /// Add a torrent to the tracker + #[allow(dead_code)] + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { + self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + } +} + +impl TestEnvironment { + #[allow(dead_code)] + pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { + let cfg = Arc::new(cfg); + + let tracker = setup_with_configuration(&cfg); + + let udp_server = udp_server(cfg.udp_trackers[0].clone()); + + Self { + cfg, + tracker, + state: Stopped { udp_server }, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> TestEnvironment { + TestEnvironment { + cfg: self.cfg, + tracker: self.tracker.clone(), + state: Running { + udp_server: self.state.udp_server.start(self.tracker).await.unwrap(), + }, + } + } +} + +impl TestEnvironment { + pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { + StoppedTestEnvironment::new_stopped(cfg).start().await + } + + #[allow(dead_code)] + pub async fn stop(self) -> TestEnvironment { + TestEnvironment { + cfg: self.cfg, + tracker: self.tracker, + state: Stopped { + udp_server: self.state.udp_server.stop().await.unwrap(), + }, + } + } + + pub fn bind_address(&self) -> SocketAddr { + self.state.udp_server.state.bind_address + } +} + +#[allow(clippy::module_name_repetitions, dead_code)] +pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { + TestEnvironment::new_stopped(cfg) +} + +#[allow(clippy::module_name_repetitions)] +pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { + TestEnvironment::new_running(cfg).await +} + +pub fn udp_server(cfg: torrust_tracker_configuration::UdpTracker) -> StoppedUdpServer { + UdpServer::new(cfg) +} diff --git a/tests/wrk_benchmark_announce.lua b/tests/wrk_benchmark_announce.lua new file mode 100644 index 000000000..c182f8e68 --- /dev/null +++ b/tests/wrk_benchmark_announce.lua @@ -0,0 +1,53 @@ +-- else the randomness would be the same every run +math.randomseed(os.time()) + +local charset = "0123456789ABCDEF" + +function hexToChar(hex) + local n = tonumber(hex, 16) + local f = string.char(n) + return f +end + +function hexStringToCharString(hex) + local ret = {} + local r + for i = 0, 19 do + local x = i * 2 + r = hex:sub(x+1, x+2) + local f = hexToChar(r) + table.insert(ret, f) + end + return table.concat(ret) +end + +function urlEncode(str) + str = string.gsub (str, "([^0-9a-zA-Z !'()*._~-])", -- locale independent + function (c) return string.format ("%%%02X", string.byte(c)) end) + str = string.gsub (str, " ", "+") + return str +end + +function genHexString(length) + local ret = {} + local r + for i = 1, length do + r = math.random(1, #charset) + table.insert(ret, charset:sub(r, r)) + end + return table.concat(ret) +end + +function randomInfoHash() + local hexString = genHexString(40) + local str = hexStringToCharString(hexString) + return urlEncode(str) +end + +-- the request function that will run at each request +request = function() + path = "/announce?info_hash=" .. randomInfoHash() .. "&peer_id=-lt0D80-a%D4%10%19%99%A6yh%9A%E1%CD%96&port=54434&uploaded=885&downloaded=0&left=0&corrupt=0&key=A78381BD&numwant=200&compact=1&no_peer_id=1&supportcrypto=1&redundant=0" + headers = {} + headers["X-Forwarded-For"] = "1.1.1.1" + return wrk.format("GET", path, headers) +end