diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000..a88db5f38 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,25 @@ +[alias] +cov = "llvm-cov" +cov-lcov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" +cov-html = "llvm-cov --html" +time = "build --timings --all-targets" + +[build] +rustflags = [ + "-D", + "warnings", + "-D", + "future-incompatible", + "-D", + "let-underscore", + "-D", + "nonstandard-style", + "-D", + "rust-2018-compatibility", + "-D", + "rust-2018-idioms", + "-D", + "rust-2021-compatibility", + "-D", + "unused", +] diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..f42859922 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,16 @@ +/.git +/.git-blame-ignore +/.github +/.gitignore +/.vscode +/bin/ +/tracker.* +/cSpell.json +/data.db +/docker/bin/ +/NOTICE +/README.md +/rustfmt.toml +/storage/ +/target/ +/etc/ diff --git a/.git-blame-ignore b/.git-blame-ignore new file mode 100644 index 000000000..06c439a36 --- /dev/null +++ b/.git-blame-ignore @@ -0,0 +1,4 @@ +# https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revs-fileltfilegt + +# Format the world! +57bf2000e39dccfc2f8b6e41d6c6f3eac38a3886 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..2ae8963e3 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +/.github/**/* @torrust/maintainers diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml new file mode 100644 index 000000000..becfbc1df --- /dev/null +++ b/.github/dependabot.yaml @@ -0,0 +1,19 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: daily + target-branch: "develop" + labels: + - "Continuous Integration" + - "Dependencies" + + - package-ecosystem: cargo + directory: / + schedule: + interval: daily + target-branch: "develop" + labels: + - "Build | Project System" + - "Dependencies" diff --git a/.github/labels.json b/.github/labels.json new file mode 100644 index 000000000..778c0c892 --- /dev/null +++ b/.github/labels.json @@ -0,0 +1,260 @@ +[ + { + "name": "- Admin -", + "color": "FFFFFF", + "description": "Enjoyable to Install and Setup our Software", + "aliases": [] + }, + { + "name": "- Contributor -", + "color": "FFFFFF", + "description": "Nice to support Torrust", + "aliases": [] + }, + { + "name": "- Developer -", + "color": "FFFFFF", + "description": "Torrust Improvement Experience", + "aliases": [] + }, + { + "name": "- User -", + "color": "FFFFFF", + "description": "Enjoyable to Use our Software", + "aliases": [] + }, + { + "name": "Blocked", + "color": "000000", + "description": "Has Unsatisfied Dependency", + "aliases": [] + }, + { + "name": "Bug", + "color": "a80506", + "description": "Incorrect Behavior", + "aliases": [] + }, + { + "name": "Build | Project System", + "color": "99AAAA", + "description": "Compiling and Packaging", + "aliases": ["Rust"] + }, + { + "name": "Cannot Reproduce", + "color": "D3D3D3", + "description": "Inconsistent Observations", + "aliases": [] + }, + { + "name": "Code Cleanup / Refactoring", + "color": "055a8b", + "description": "Tidying and Making Neat", + "aliases": [] + }, + { + "name": "Continuous Integration", + "color": "41c6b3", + "description": "Workflows and Automation", + "aliases": [] + }, + { + "name": "Dependencies", + "color": "d4f8f6", + "description": "Related to Dependencies", + "aliases": [] + }, + { + "name": "Documentation", + "color": "3d2133", + "description": "Improves Instructions, Guides, and Notices", + "aliases": [] + }, + { + "name": "Duplicate", + "color": "cfd3d7", + "description": "Not Unique", + "aliases": [] + }, + { + "name": "Easy", + "color": "f0cff0", + "description": "Good for Newcomers", + "aliases": [] + }, + { + "name": "Enhancement / Feature Request", + "color": "c9ecbf", + "description": "Something New", + "aliases": [] + }, + { + "name": "External Tools", + "color": "a6006b", + "description": "3rd Party Systems", + "aliases": [] + }, + { + "name": "First Time Contribution", + "color": "f1e0e6", + "description": "Welcome to Torrust", + "aliases": [] + }, + { + "name": "Fixed", + "color": "8e4c42", + "description": "Not a Concern Anymore", + "aliases": [] + }, + { + "name": "Hard", + "color": "2c2c2c", + "description": "Non-Trivial", + "aliases": [] + }, + { + "name": "Help Wanted", + "color": "00896b", + "description": "More Contributions are Appreciated", + "aliases": [] + }, + { + "name": "High Priority", + "color": "ba3fbc", + "description": "Focus Required", + "aliases": [] + }, + { + "name": "Hold Merge", + "color": "9aafbe", + "description": "We are not Ready Yet", + "aliases": [] + }, + { + "name": "Installer | Package", + "color": "ed8b24", + "description": "Distribution to Users", + "aliases": [] + }, + { + "name": "Invalid", + "color": "c1c1c1", + "description": "This doesn't seem right", + "aliases": [] + }, + { + "name": "Legal", + "color": "463e60", + "description": "Licenses and other Official Documents", + "aliases": [] + }, + { + "name": "Low Priority", + "color": "43536b", + "description": "Not our Focus Now", + "aliases": [] + }, + { + "name": "Needs Feedback", + "color": "d6946c", + "description": "What dose the Community Think?", + "aliases": [] + }, + { + "name": "Needs Rebase", + "color": "FBC002", + "description": "Base Branch has Incompatibilities", + "aliases": [] + }, + { + "name": "Needs Research", + "color": "4bc021", + "description": "We Need to Know More About This", + "aliases": [] + }, + { + "name": "Optimization", + "color": "faeba8", + "description": "Make it Faster", + "aliases": [] + }, + { + "name": "Portability", + "color": "95de82", + "description": "Distribution to More Places", + "aliases": [] + }, + { + "name": "Postponed", + "color": "dadada", + "description": "For Later", + "aliases": [] + }, + { + "name": "Quality & Assurance", + "color": "eea2e8", + "description": "Relates to QA, Testing, and CI", + "aliases": [] + }, + { + "name": "Question / Discussion", + "color": "f89d00", + "description": "Community Feedback", + "aliases": [] + }, + { + "name": "Regression", + "color": "d10588", + "description": "It dose not work anymore", + "aliases": [] + }, + { + "name": "Reviewed", + "color": "f4f4ea", + "description": "This Looks Good", + "aliases": [] + }, + { + "name": "Security", + "color": "650606", + "description": "Publicly Connected to Security", + "aliases": [] + }, + { + "name": "Testing", + "color": "c5def5", + "description": "Checking Torrust", + "aliases": [] + }, + { + "name": "Translations", + "color": "0c86af", + "description": "Localization and Cultural Adaptions", + "aliases": [] + }, + { + "name": "Trivial", + "color": "5f9685", + "description": "Something Easy", + "aliases": [] + }, + { + "name": "Won't Fix", + "color": "070003", + "description": "Something Not Relevant", + "aliases": [] + }, + { + "name": "Workaround Possible", + "color": "eae3e7", + "description": "You can still do it another way", + "aliases": [] + }, + { + "name": "good first issue", + "color": "b0fc38", + "description": "Feel free to seek assistance when needed", + "aliases": [] + } +] diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml new file mode 100644 index 000000000..9f51f3124 --- /dev/null +++ b/.github/workflows/container.yaml @@ -0,0 +1,178 @@ +name: Container + +on: + push: + branches: + - "develop" + - "main" + - "releases/**/*" + pull_request: + branches: + - "develop" + - "main" + +env: + CARGO_TERM_COLOR: always + +jobs: + test: + name: Test (Docker) + runs-on: ubuntu-latest + + strategy: + matrix: + target: [debug, release] + + steps: + - id: setup + name: Setup Toolchain + uses: docker/setup-buildx-action@v3 + + - id: build + name: Build + uses: docker/build-push-action@v6 + with: + file: ./Containerfile + push: false + load: true + target: ${{ matrix.target }} + tags: torrust-tracker:local + cache-from: type=gha + cache-to: type=gha + + - id: inspect + name: Inspect + run: docker image inspect torrust-tracker:local + + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: compose + name: Compose + run: docker compose build + + context: + name: Context + needs: test + runs-on: ubuntu-latest + + outputs: + continue: ${{ steps.check.outputs.continue }} + type: ${{ steps.check.outputs.type }} + version: ${{ steps.check.outputs.version }} + + steps: + - id: check + name: Check Context + run: | + if [[ "${{ github.repository }}" == "torrust/torrust-tracker" ]]; then + if [[ "${{ github.event_name }}" == "push" ]]; then + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + + echo "type=development" >> $GITHUB_OUTPUT + echo "continue=true" >> $GITHUB_OUTPUT + echo "On \`main\` Branch, Type: \`development\`" + + elif [[ "${{ github.ref }}" == "refs/heads/develop" ]]; then + + echo "type=development" >> $GITHUB_OUTPUT + echo "continue=true" >> $GITHUB_OUTPUT + echo "On \`develop\` Branch, Type: \`development\`" + + elif [[ $(echo "${{ github.ref }}" | grep -P '^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$') ]]; then + + version=$(echo "${{ github.ref }}" | sed -n -E 's/^(refs\/heads\/releases\/)//p') + echo "version=$version" >> $GITHUB_OUTPUT + echo "type=release" >> $GITHUB_OUTPUT + echo "continue=true" >> $GITHUB_OUTPUT + echo "In \`releases/$version\` Branch, Type: \`release\`" + + else + echo "Not Correct Branch. Will Not Continue" + fi + else + echo "Not a Push Event. Will Not Continue" + fi + else + echo "On a Forked Repository. Will Not Continue" + fi + + publish_development: + name: Publish (Development) + environment: dockerhub-torrust + needs: context + if: needs.context.outputs.continue == 'true' && needs.context.outputs.type == 'development' + runs-on: ubuntu-latest + + steps: + - id: meta + name: Docker Meta + uses: docker/metadata-action@v5 + with: + images: | + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=ref,event=branch + + - id: login + name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - id: setup + name: Setup Toolchain + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v6 + with: + file: ./Containerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha + + publish_release: + name: Publish (Release) + environment: dockerhub-torrust + needs: context + if: needs.context.outputs.continue == 'true' && needs.context.outputs.type == 'release' + runs-on: ubuntu-latest + + steps: + - id: meta + name: Docker Meta + uses: docker/metadata-action@v5 + with: + images: | + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=semver,value=${{ needs.context.outputs.version }},pattern={{raw}} + type=semver,value=${{ needs.context.outputs.version }},pattern={{version}} + type=semver,value=${{ needs.context.outputs.version }},pattern=v{{major}} + type=semver,value=${{ needs.context.outputs.version }},pattern={{major}}.{{minor}} + + - id: login + name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - id: setup + name: Setup Toolchain + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v6 + with: + file: ./Containerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha diff --git a/.github/workflows/contract.yaml b/.github/workflows/contract.yaml new file mode 100644 index 000000000..2777417e3 --- /dev/null +++ b/.github/workflows/contract.yaml @@ -0,0 +1,58 @@ +name: Contract + +on: + push: + pull_request: + +env: + CARGO_TERM_COLOR: always + +jobs: + contract: + name: Contract + runs-on: ubuntu-latest + + strategy: + matrix: + toolchain: [nightly, stable] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: llvm-tools-preview + + - id: cache + name: Enable Job Cache + uses: Swatinem/rust-cache@v2 + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: cargo-llvm-cov, cargo-nextest + + - id: pretty-test + name: Install pretty-test + run: cargo install cargo-pretty-test + + - id: contract + name: Run contract + run: | + cargo test --lib --bins + cargo pretty-test --lib --bins + + - id: summary + name: Generate contract Summary + run: | + echo "### Tracker Living Contract! :rocket:" >> $GITHUB_STEP_SUMMARY + cargo pretty-test --lib --bins --color=never >> $GITHUB_STEP_SUMMARY + echo '```console' >> $GITHUB_STEP_SUMMARY + echo "$OUTPUT" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml new file mode 100644 index 000000000..28c1be6d0 --- /dev/null +++ b/.github/workflows/coverage.yaml @@ -0,0 +1,85 @@ +name: Coverage + +on: + push: + branches: + - develop + pull_request_target: + branches: + - develop + +env: + CARGO_TERM_COLOR: always + +jobs: + report: + name: Report + environment: coverage + runs-on: ubuntu-latest + env: + CARGO_INCREMENTAL: "0" + RUSTFLAGS: "-Z profile -C codegen-units=1 -C opt-level=0 -C link-dead-code -C overflow-checks=off -Z panic_abort_tests -C panic=abort" + RUSTDOCFLAGS: "-Z profile -C codegen-units=1 -C opt-level=0 -C link-dead-code -C overflow-checks=off -Z panic_abort_tests -C panic=abort" + + steps: + - id: checkout_push + if: github.event_name == 'push' + name: Checkout Repository (Push) + uses: actions/checkout@v4 + + - id: checkout_pull_request_target + if: github.event_name == 'pull_request_target' + name: Checkout Repository (Pull Request Target) + uses: actions/checkout@v4 + with: + ref: "refs/pull/${{ github.event.pull_request.number }}/head" + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@nightly + with: + toolchain: nightly + components: llvm-tools-preview + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: grcov + + - id: check + name: Run Build Checks + run: cargo check --tests --benches --examples --workspace --all-targets --all-features + + - id: clean + name: Clean Build Directory + run: cargo clean + + - id: build + name: Pre-build Main Project + run: cargo build --workspace --all-targets --all-features --jobs 2 + + - id: build_tests + name: Pre-build Tests + run: cargo build --workspace --all-targets --all-features --tests --jobs 2 + + - id: test + name: Run Unit Tests + run: cargo test --tests --workspace --all-targets --all-features + + - id: coverage + name: Generate Coverage Report + uses: alekitto/grcov@v0.2 + + - id: upload + name: Upload Coverage Report + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: ${{ steps.coverage.outputs.report }} + verbose: true + fail_ci_if_error: true diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml new file mode 100644 index 000000000..6aa66e985 --- /dev/null +++ b/.github/workflows/deployment.yaml @@ -0,0 +1,61 @@ +name: Deployment + +on: + push: + branches: + - "releases/**/*" + +jobs: + test: + name: Test + runs-on: ubuntu-latest + + strategy: + matrix: + toolchain: [nightly, stable] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + + - id: test + name: Run Unit Tests + run: cargo test --tests --benches --examples --workspace --all-targets --all-features + + publish: + name: Publish + environment: deployment + needs: test + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + + - id: publish + name: Publish Crates + env: + CARGO_REGISTRY_TOKEN: "${{ secrets.TORRUST_UPDATE_CARGO_REGISTRY_TOKEN }}" + run: | + cargo publish -p torrust-tracker-contrib-bencode + cargo publish -p torrust-tracker-located-error + cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-clock + cargo publish -p torrust-tracker-configuration + cargo publish -p torrust-tracker-torrent-repository + cargo publish -p torrust-tracker-test-helpers + cargo publish -p torrust-tracker diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml new file mode 100644 index 000000000..bb8283f30 --- /dev/null +++ b/.github/workflows/labels.yaml @@ -0,0 +1,36 @@ +name: Labels +on: + workflow_dispatch: + push: + branches: + - develop + paths: + - "/.github/labels.json" + +jobs: + export: + name: Export Existing Labels + runs-on: ubuntu-latest + + steps: + - id: backup + name: Export to Workflow Artifact + uses: EndBug/export-label-config@v1 + + sync: + name: Synchronize Labels from Repo + needs: export + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: sync + name: Apply Labels from File + uses: EndBug/label-sync@v2 + with: + config-file: .github/labels.json + delete-other-labels: true + token: ${{ secrets.UPDATE_LABELS }} diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml deleted file mode 100644 index 344b8a025..000000000 --- a/.github/workflows/test_build_release.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: CI - -# Only trigger, when the test workflow succeeded -on: [push, pull_request] - -jobs: - test: - runs-on: ubuntu-latest - env: - CARGO_TERM_COLOR: always - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - uses: Swatinem/rust-cache@v1 - - name: Run tests - run: cargo test - - build: - needs: test - if: | - github.event_name == 'push' && - github.event.base_ref == 'refs/heads/main' && - startsWith(github.ref, 'refs/tags/v') - runs-on: ubuntu-latest - env: - CARGO_TERM_COLOR: always - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - uses: Swatinem/rust-cache@v1 - - name: Build torrust tracker - run: cargo build --release - - name: Upload build artifact - uses: actions/upload-artifact@v2 - with: - name: torrust-tracker - path: ./target/release/torrust-tracker - - release: - needs: build - runs-on: ubuntu-latest - steps: - - name: Download build artifact - uses: actions/download-artifact@v2 - with: - name: torrust-tracker - - name: Release - uses: softprops/action-gh-release@v1 - with: - files: | - torrust-tracker diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml new file mode 100644 index 000000000..abe6f0a60 --- /dev/null +++ b/.github/workflows/testing.yaml @@ -0,0 +1,155 @@ +name: Testing + +on: + push: + pull_request: + +env: + CARGO_TERM_COLOR: always + +jobs: + format: + name: Formatting + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: nightly + components: rustfmt + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + - id: format + name: Run Formatting-Checks + run: cargo fmt --check + + check: + name: Static Analysis + runs-on: ubuntu-latest + needs: format + + strategy: + matrix: + toolchain: [nightly, stable] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: clippy + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: cargo-machete + + - id: check + name: Run Build Checks + run: cargo check --tests --benches --examples --workspace --all-targets --all-features + + - id: lint + name: Run Lint Checks + run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic + + - id: docs + name: Lint Documentation + env: + RUSTDOCFLAGS: "-D warnings" + run: cargo doc --no-deps --bins --examples --workspace --all-features + + - id: clean + name: Clean Build Directory + run: cargo clean + + - id: deps + name: Check Unused Dependencies + run: cargo machete + + + unit: + name: Units + runs-on: ubuntu-latest + needs: check + + strategy: + matrix: + toolchain: [nightly, stable] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: llvm-tools-preview + + - id: cache + name: Enable Job Cache + uses: Swatinem/rust-cache@v2 + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: cargo-llvm-cov, cargo-nextest + + - id: test-docs + name: Run Documentation Tests + run: cargo test --doc + + - id: test + name: Run Unit Tests + run: cargo test --tests --benches --examples --workspace --all-targets --all-features + + e2e: + name: E2E + runs-on: ubuntu-latest + needs: unit + + strategy: + matrix: + toolchain: [nightly, stable] + + steps: + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: llvm-tools-preview + + - id: cache + name: Enable Job Cache + uses: Swatinem/rust-cache@v2 + + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: test + name: Run E2E Tests + run: cargo run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" diff --git a/.gitignore b/.gitignore index cc36c1e59..b60b28991 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,16 @@ -/target +.env **/*.rs.bk -/database.json.bz2 -/database.db +/.coverage/ /.idea/ -/config.toml +/.vscode/launch.json +/data.db +/database.db +/database.json.bz2 +/flamegraph.svg +/storage/ +/target +/tracker.* +/tracker.toml +callgrind.out +perf.data* +*.code-workspace \ No newline at end of file diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 000000000..934a43eb8 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,7 @@ +{ + "recommendations": [ + "streetsidesoftware.code-spell-checker", + "rust-lang.rust-analyzer", + "tamasfe.even-better-toml" + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..caa48dd01 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,35 @@ +{ + "[rust]": { + "editor.formatOnSave": true + }, + "[ignore]": { "rust-analyzer.cargo.extraEnv" : { + "RUSTFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", + "RUSTDOCFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", + "CARGO_INCREMENTAL": "0", + "RUST_BACKTRACE": "1" + }}, + "rust-analyzer.checkOnSave": true, + "rust-analyzer.check.command": "clippy", + "rust-analyzer.check.allTargets": true, + "rust-analyzer.check.extraArgs": [ + "--", + "-D", + "clippy::correctness", + "-D", + "clippy::suspicious", + "-W", + "clippy::complexity", + "-W", + "clippy::perf", + "-W", + "clippy::style", + "-W", + "clippy::pedantic" + ], + "evenBetterToml.formatter.allowedBlankLines": 1, + "evenBetterToml.formatter.columnWidth": 130, + "evenBetterToml.formatter.trailingNewline": true, + "evenBetterToml.formatter.reorderKeys": true, + "evenBetterToml.formatter.reorderArrays": true, + +} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 5e90db008..050e22414 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3,1932 +3,4171 @@ version = 3 [[package]] -name = "aho-corasick" -version = "0.7.18" +name = "addr2line" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ - "memchr", + "gimli", ] [[package]] -name = "arrayvec" -version = "0.5.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] -name = "async-trait" -version = "0.1.51" +name = "ahash" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "proc-macro2", - "quote", - "syn", + "getrandom", + "once_cell", + "version_check", ] [[package]] -name = "attohttpc" -version = "0.16.3" +name = "ahash" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "http", - "log", - "url", - "wildmatch", + "cfg-if", + "once_cell", + "version_check", + "zerocopy", ] [[package]] -name = "atty" -version = "0.2.14" +name = "aho-corasick" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ - "hermit-abi", - "libc", - "winapi", + "memchr", ] [[package]] -name = "autocfg" -version = "1.0.1" +name = "alloc-no-stdlib" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] -name = "base64" -version = "0.13.0" +name = "alloc-stdlib" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] [[package]] -name = "binascii" -version = "0.1.4" +name = "allocator-api2" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] -name = "bitflags" -version = "1.2.1" +name = "android-tzdata" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] -name = "block-buffer" -version = "0.7.3" +name = "android_system_properties" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.3", + "libc", ] [[package]] -name = "block-buffer" -version = "0.9.0" +name = "anes" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array 0.14.4", -] +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] -name = "block-padding" -version = "0.1.5" +name = "anstream" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ - "byte-tools", + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", ] [[package]] -name = "buf_redux" -version = "0.8.4" +name = "anstyle" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "anstyle-parse" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ - "memchr", - "safemem", + "utf8parse", ] [[package]] -name = "bumpalo" -version = "3.8.0" +name = "anstyle-query" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +dependencies = [ + "windows-sys 0.52.0", +] [[package]] -name = "byte-tools" -version = "0.3.1" +name = "anstyle-wincon" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] [[package]] -name = "byteorder" -version = "1.4.2" +name = "anyhow" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] -name = "bytes" -version = "1.0.1" +name = "aquatic_peer_id" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "f0732a73df221dcb25713849c6ebaf57b85355f669716652a7466f688cc06f25" +dependencies = [ + "compact_str", + "hex", + "quickcheck", + "regex", + "serde", + "zerocopy", +] [[package]] -name = "cc" -version = "1.0.66" +name = "aquatic_udp_protocol" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "0af90e5162f5fcbde33524128f08dc52a779f32512d5f8692eadd4b55c89389e" +dependencies = [ + "aquatic_peer_id", + "byteorder", + "either", + "zerocopy", +] [[package]] -name = "cfg-if" -version = "0.1.10" +name = "arc-swap" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] -name = "cfg-if" -version = "1.0.0" +name = "arrayvec" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] -name = "chrono" -version = "0.4.19" +name = "async-attributes" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ - "libc", - "num-integer", - "num-traits 0.2.14", - "time", - "winapi", + "quote", + "syn 1.0.109", ] [[package]] -name = "config" -version = "0.11.0" +name = "async-channel" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1b9d958c2b1368a663f05538fc1b5975adce1e19f435acceae987aceeeb369" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ - "lazy_static", - "nom", - "rust-ini", - "serde 1.0.120", - "serde-hjson", - "serde_json", - "toml", - "yaml-rust", + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", ] [[package]] -name = "convert_case" -version = "0.4.0" +name = "async-channel" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] [[package]] -name = "core-foundation" -version = "0.9.1" +name = "async-compression" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +checksum = "fec134f64e2bc57411226dfc4e52dec859ddfc7e711fc5e07b612584f000e4aa" dependencies = [ - "core-foundation-sys", - "libc", + "brotli", + "flate2", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", ] [[package]] -name = "core-foundation-sys" -version = "0.8.2" +name = "async-executor" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand", + "futures-lite", + "slab", +] [[package]] -name = "cpufeatures" -version = "0.2.1" +name = "async-global-executor" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "libc", + "async-channel 2.3.1", + "async-executor", + "async-io", + "async-lock", + "blocking", + "futures-lite", + "once_cell", + "tokio", ] [[package]] -name = "data-encoding" -version = "2.3.2" +name = "async-io" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +dependencies = [ + "async-lock", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix", + "slab", + "tracing", + "windows-sys 0.59.0", +] [[package]] -name = "derive_more" -version = "0.99.17" +name = "async-lock" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "convert_case", - "proc-macro2", - "quote", - "rustc_version", - "syn", + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite", ] [[package]] -name = "digest" -version = "0.8.1" +name = "async-std" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ - "generic-array 0.12.3", + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io", + "async-lock", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", ] [[package]] -name = "digest" -version = "0.9.0" +name = "async-task" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array 0.14.4", -] +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] -name = "enum-as-inner" -version = "0.3.3" +name = "async-trait" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ - "heck", "proc-macro2", "quote", - "syn", + "syn 2.0.77", ] [[package]] -name = "env_logger" -version = "0.9.0" +name = "atomic" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", + "bytemuck", ] [[package]] -name = "external-ip" -version = "4.1.0" +name = "atomic-waker" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2406194c5c4be3678bd7c1c128237ec589a6a3b7a3b05786971998bda7693c27" -dependencies = [ - "futures", - "http", - "hyper", - "hyper-tls", - "igd", - "log", - "rand 0.8.4", - "trust-dns-resolver", -] +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] -name = "fake-simd" -version = "0.1.2" +name = "autocfg" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] -name = "fallible-iterator" -version = "0.2.0" +name = "aws-lc-rs" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "2f95446d919226d587817a7d21379e6eb099b97b45110a7f272a444ca5c54070" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] [[package]] -name = "fallible-streaming-iterator" -version = "0.1.9" +name = "aws-lc-sys" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +checksum = "b3ddc4a5b231dd6958b140ff3151b6412b3f4321fab354f399eec8f14b06df62" +dependencies = [ + "bindgen 0.69.4", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] [[package]] -name = "fern" -version = "0.6.0" +name = "axum" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9a4820f0ccc8a7afd67c39a0f1a0f4b07ca1725164271a64939d7aeb9af065" +checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" dependencies = [ - "log", + "async-trait", + "axum-core", + "axum-macros", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tower 0.5.1", + "tower-layer", + "tower-service", + "tracing", ] [[package]] -name = "fnv" -version = "1.0.7" +name = "axum-client-ip" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +checksum = "72188bed20deb981f3a4a9fe674e5980fd9e9c2bd880baa94715ad5d60d64c67" +dependencies = [ + "axum", + "forwarded-header-value", + "serde", +] [[package]] -name = "foreign-types" -version = "0.3.2" +name = "axum-core" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" dependencies = [ - "foreign-types-shared", + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.1", + "tower-layer", + "tower-service", + "tracing", ] [[package]] -name = "foreign-types-shared" -version = "0.1.1" +name = "axum-extra" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +checksum = "73c3220b188aea709cf1b6c5f9b01c3bd936bb08bd2b5184a12b35ac8131b1f9" +dependencies = [ + "axum", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "serde", + "serde_html_form", + "tower 0.5.1", + "tower-layer", + "tower-service", + "tracing", +] [[package]] -name = "form_urlencoded" -version = "1.0.0" +name = "axum-macros" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" +checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ - "matches", - "percent-encoding", + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] -name = "futures" -version = "0.3.12" +name = "axum-server" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" +checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", + "arc-swap", + "bytes", "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower 0.4.13", + "tower-service", ] [[package]] -name = "futures-channel" -version = "0.3.12" +name = "backtrace" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ - "futures-core", - "futures-sink", + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets", ] [[package]] -name = "futures-core" -version = "0.3.12" +name = "base64" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] -name = "futures-executor" -version = "0.3.12" +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bigdecimal" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" +checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" dependencies = [ - "futures-core", - "futures-task", - "futures-util", + "autocfg", + "libm", + "num-bigint", + "num-integer", + "num-traits", ] [[package]] -name = "futures-io" -version = "0.3.12" +name = "binascii" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" +checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] -name = "futures-macro" -version = "0.3.12" +name = "bindgen" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "proc-macro-hack", + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "lazy_static", + "lazycell", + "log", + "prettyplease", "proc-macro2", "quote", - "syn", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.77", + "which", ] [[package]] -name = "futures-sink" -version = "0.3.12" +name = "bindgen" +version = "0.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.77", +] + +[[package]] +name = "bitflags" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] -name = "futures-task" -version = "0.3.12" +name = "bitvec" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ - "once_cell", + "funty", + "radium", + "tap", + "wyz", ] [[package]] -name = "futures-util" -version = "0.3.12" +name = "block-buffer" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "futures-channel", - "futures-core", + "generic-array", +] + +[[package]] +name = "blocking" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +dependencies = [ + "async-channel 2.3.1", + "async-task", "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "proc-macro-hack", - "proc-macro-nested", - "slab", + "futures-lite", + "piper", ] [[package]] -name = "generic-array" -version = "0.12.3" +name = "borsh" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ - "typenum", + "borsh-derive", + "cfg_aliases", ] [[package]] -name = "generic-array" -version = "0.14.4" +name = "borsh-derive" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ - "typenum", - "version_check", + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.77", + "syn_derive", ] [[package]] -name = "getrandom" -version = "0.1.16" +name = "brotli" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", ] [[package]] -name = "getrandom" -version = "0.2.3" +name = "brotli-decompressor" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "alloc-no-stdlib", + "alloc-stdlib", ] [[package]] -name = "h2" -version = "0.3.4" +name = "btoi" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "9dd6407f73a9b8b6162d8a2ef999fe6afd7cc15902ebf42c5cd296addf17e0ad" dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", + "num-traits", ] [[package]] -name = "hashbrown" -version = "0.9.1" +name = "bufstream" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] -name = "headers" -version = "0.3.3" +name = "bumpalo" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62689dc57c7456e69712607ffcbd0aa1dfcccf9af73727e9b25bc1825375cac3" -dependencies = [ - "base64", - "bitflags", - "bytes", - "headers-core", - "http", - "mime", - "sha-1 0.8.2", - "time", -] +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] -name = "headers-core" -version = "0.2.0" +name = "bytecheck" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" dependencies = [ - "http", + "bytecheck_derive", + "ptr_meta", + "simdutf8", ] [[package]] -name = "heck" -version = "0.3.3" +name = "bytecheck_derive" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ - "unicode-segmentation", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "hermit-abi" -version = "0.1.18" +name = "bytemuck" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" -dependencies = [ - "libc", -] +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" [[package]] -name = "hex" -version = "0.4.3" +name = "byteorder" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] -name = "hostname" -version = "0.3.1" +name = "bytes" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" + +[[package]] +name = "camino" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" dependencies = [ - "libc", - "match_cfg", - "winapi", + "serde", ] [[package]] -name = "http" +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "castaway" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" dependencies = [ - "bytes", - "fnv", - "itoa", + "rustversion", ] [[package]] -name = "http-body" -version = "0.4.0" +name = "cc" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ - "bytes", - "http", + "jobserver", + "libc", + "shlex", ] [[package]] -name = "httparse" -version = "1.5.1" +name = "cexpr" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] [[package]] -name = "httpdate" -version = "0.3.2" +name = "cfg-if" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] -name = "humantime" -version = "2.1.0" +name = "cfg_aliases" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] -name = "hyper" -version = "0.14.2" +name = "chrono" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12219dc884514cb4a6a03737f4413c0e01c23a1b059b0156004b23f1e19dccbe" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", + "android-tzdata", + "iana-time-zone", + "num-traits", + "serde", + "windows-targets", ] [[package]] -name = "hyper-tls" -version = "0.5.0" +name = "ciborium" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", + "ciborium-io", + "ciborium-ll", + "serde", ] [[package]] -name = "idna" -version = "0.2.3" +name = "ciborium-io" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] -name = "igd" -version = "0.12.0" +name = "ciborium-ll" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4e7ee8b51e541486d7040883fe1f00e2a9954bcc24fd155b7e4f03ed4b93dd" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ - "attohttpc", - "log", - "rand 0.8.4", - "url", - "xmltree", + "ciborium-io", + "half", ] [[package]] -name = "indexmap" -version = "1.6.1" +name = "clang-sys" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ - "autocfg", - "hashbrown", + "glob", + "libc", + "libloading", ] [[package]] -name = "input_buffer" -version = "0.4.0" +name = "clap" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ - "bytes", + "clap_builder", + "clap_derive", ] [[package]] -name = "instant" -version = "0.1.10" +name = "clap_builder" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ - "cfg-if 1.0.0", + "anstream", + "anstyle", + "clap_lex", + "strsim", ] [[package]] -name = "ipconfig" -version = "0.2.2" +name = "clap_derive" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "socket2", - "widestring", - "winapi", - "winreg", + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] -name = "ipnet" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" - -[[package]] -name = "itoa" -version = "0.4.7" +name = "clap_lex" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] -name = "js-sys" -version = "0.3.55" +name = "cmake" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" dependencies = [ - "wasm-bindgen", + "cc", ] [[package]] -name = "lazy_static" -version = "1.4.0" +name = "colorchoice" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] -name = "lexical-core" -version = "0.7.6" +name = "compact_str" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" +checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" dependencies = [ - "arrayvec", - "bitflags", - "cfg-if 1.0.0", + "castaway", + "cfg-if", + "itoa", "ryu", "static_assertions", ] [[package]] -name = "libc" -version = "0.2.101" +name = "concurrent-queue" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] [[package]] -name = "libsqlite3-sys" -version = "0.18.0" +name = "core-foundation" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e704a02bcaecd4a08b93a23f6be59d0bd79cd161e0963e9499165a0a35df7bd" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ - "pkg-config", - "vcpkg", + "core-foundation-sys", + "libc", ] [[package]] -name = "linked-hash-map" -version = "0.5.4" +name = "core-foundation-sys" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] -name = "lock_api" -version = "0.4.4" +name = "cpufeatures" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ - "scopeguard", + "libc", ] [[package]] -name = "log" -version = "0.4.13" +name = "crc32fast" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] -name = "lru-cache" -version = "0.1.2" +name = "criterion" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ - "linked-hash-map", + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "futures", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "tokio", + "walkdir", ] [[package]] -name = "match_cfg" -version = "0.1.0" +name = "criterion-plot" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] [[package]] -name = "matches" -version = "0.1.8" +name = "crossbeam" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", +] [[package]] -name = "memchr" -version = "2.4.1" +name = "crossbeam-channel" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +dependencies = [ + "crossbeam-utils", +] [[package]] -name = "mime" -version = "0.3.16" +name = "crossbeam-deque" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] [[package]] -name = "mime_guess" -version = "2.0.3" +name = "crossbeam-epoch" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "mime", - "unicase", + "crossbeam-utils", ] [[package]] -name = "mio" -version = "0.7.7" +name = "crossbeam-queue" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "libc", - "log", - "miow", - "ntapi", - "winapi", + "crossbeam-utils", ] [[package]] -name = "miow" -version = "0.3.6" +name = "crossbeam-skiplist" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +checksum = "df29de440c58ca2cc6e587ec3d22347551a32435fbde9d2bff64e78a9ffa151b" dependencies = [ - "socket2", - "winapi", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] -name = "multipart" -version = "0.17.1" +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050aeedc89243f5347c3e237e3e13dc76fbe4ae3742a57b94dc14f69acf76d4" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "buf_redux", + "generic-array", + "typenum", +] + +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.77", +] + +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", + "unicode-xid", +] + +[[package]] +name = "derive_utils" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "encoding_rs" +version = "0.8.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "env_logger" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite", +] + +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + +[[package]] +name = "fastrand" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" + +[[package]] +name = "figment" +version = "0.10.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" +dependencies = [ + "atomic", + "parking_lot", + "pear", + "serde", + "tempfile", + "toml", + "uncased", + "version_check", +] + +[[package]] +name = "flate2" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +dependencies = [ + "crc32fast", + "libz-sys", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror", +] + +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + +[[package]] +name = "frunk" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "874b6a17738fc273ec753618bac60ddaeac48cb1d7684c3e7bd472e57a28b817" +dependencies = [ + "frunk_core", + "frunk_derives", + "frunk_proc_macros", + "serde", +] + +[[package]] +name = "frunk_core" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3529a07095650187788833d585c219761114005d5976185760cf794d265b6a5c" +dependencies = [ + "serde", +] + +[[package]] +name = "frunk_derives" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" +dependencies = [ + "frunk_proc_macro_helpers", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "frunk_proc_macro_helpers" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05a956ef36c377977e512e227dcad20f68c2786ac7a54dacece3746046fea5ce" +dependencies = [ + "frunk_core", + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "frunk_proc_macros" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e86c2c9183662713fea27ea527aad20fb15fee635a71081ff91bf93df4dc51" +dependencies = [ + "frunk_core", + "frunk_proc_macro_helpers", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "h2" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.5.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash 0.8.11", + "allocator-api2", +] + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2", + "http", + "http-body", "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "socket2", + "tokio", + "tower 0.4.13", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +dependencies = [ + "equivalent", + "hashbrown 0.14.5", + "serde", +] + +[[package]] +name = "inlinable_string" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" + +[[package]] +name = "io-enum" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53b53d712d99a73eec59ee5e4fe6057f8052142d38eeafbbffcb06b36d738a6e" +dependencies = [ + "derive_utils", +] + +[[package]] +name = "ipnet" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" + +[[package]] +name = "is-terminal" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +dependencies = [ + "hermit-abi 0.4.0", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.158" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" + +[[package]] +name = "libloading" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +dependencies = [ + "cfg-if", + "windows-targets", +] + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libsqlite3-sys" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "local-ip-address" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" +dependencies = [ + "libc", + "neli", + "thiserror", + "windows-sys 0.59.0", +] + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +dependencies = [ + "value-bag", +] + +[[package]] +name = "lru" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "wasi", + "windows-sys 0.52.0", +] + +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + +[[package]] +name = "mockall" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +dependencies = [ + "serde", +] + +[[package]] +name = "mysql" +version = "25.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6ad644efb545e459029b1ffa7c969d830975bd76906820913247620df10050b" +dependencies = [ + "bufstream", + "bytes", + "crossbeam", + "flate2", + "io-enum", + "libc", + "lru", + "mysql_common", + "named_pipe", + "native-tls", + "pem", + "percent-encoding", + "serde", + "serde_json", + "socket2", + "twox-hash", + "url", +] + +[[package]] +name = "mysql-common-derive" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afe0450cc9344afff34915f8328600ab5ae19260802a334d0f72d2d5bdda3bfe" +dependencies = [ + "darling", + "heck 0.4.1", + "num-bigint", + "proc-macro-crate", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.77", + "termcolor", + "thiserror", +] + +[[package]] +name = "mysql_common" +version = "0.32.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478b0ff3f7d67b79da2b96f56f334431aef65e15ba4b29dd74a4236e29582bdc" +dependencies = [ + "base64 0.21.7", + "bigdecimal", + "bindgen 0.70.1", + "bitflags", + "bitvec", + "btoi", + "byteorder", + "bytes", + "cc", + "cmake", + "crc32fast", + "flate2", + "frunk", + "lazy_static", + "mysql-common-derive", + "num-bigint", + "num-traits", + "rand", + "regex", + "rust_decimal", + "saturating", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "subprocess", + "thiserror", + "time", + "uuid", + "zstd", +] + +[[package]] +name = "named_pipe" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad9c443cce91fc3e12f017290db75dde490d685cdaaf508d7159d7cf41f0eb2b" +dependencies = [ + "winapi", +] + +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "neli" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43" +dependencies = [ + "byteorder", + "libc", + "log", + "neli-proc-macros", +] + +[[package]] +name = "neli-proc-macros" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" +dependencies = [ + "either", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nonempty" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.36.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "oorandom" +version = "11.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" + +[[package]] +name = "openssl" +version = "0.10.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pear" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" +dependencies = [ + "inlinable_string", + "pear_codegen", + "yansi", +] + +[[package]] +name = "pear_codegen" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" +dependencies = [ + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.1", + "serde", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_codegen" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" +dependencies = [ + "phf_generator", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared", + "rand", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + +[[package]] +name = "pkg-config" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "polling" +version = "3.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi 0.4.0", + "pin-project-lite", + "rustix", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "predicates" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" +dependencies = [ + "anstyle", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" + +[[package]] +name = "predicates-tree" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" +dependencies = [ + "predicates-core", + "termtree", +] + +[[package]] +name = "prettyplease" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +dependencies = [ + "proc-macro2", + "syn 2.0.77", +] + +[[package]] +name = "proc-macro-crate" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proc-macro2-diagnostics" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", + "version_check", + "yansi", +] + +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "quickcheck" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +dependencies = [ + "env_logger", + "log", + "rand", +] + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r2d2" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" +dependencies = [ + "log", + "parking_lot", + "scheduled-thread-pool", +] + +[[package]] +name = "r2d2_mysql" +version = "25.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93963fe09ca35b0311d089439e944e42a6cb39bf8ea323782ddb31240ba2ae87" +dependencies = [ + "mysql", + "r2d2", +] + +[[package]] +name = "r2d2_sqlite" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb14dba8247a6a15b7fdbc7d389e2e6f03ee9f184f87117706d509c092dfe846" +dependencies = [ + "r2d2", + "rusqlite", + "uuid", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "redox_syscall" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" + +[[package]] +name = "relative-path" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" + +[[package]] +name = "rend" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" +dependencies = [ + "bytecheck", +] + +[[package]] +name = "reqwest" +version = "0.12.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +dependencies = [ + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-tls", + "hyper-util", + "ipnet", + "js-sys", "log", "mime", - "mime_guess", - "quick-error", - "rand 0.7.3", - "safemem", - "tempfile", - "twoway", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "windows-registry", +] + +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom", + "libc", + "spin", + "untrusted", + "windows-sys 0.52.0", ] [[package]] -name = "native-tls" -version = "0.2.8" +name = "ringbuf" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" +checksum = "46f7f1b88601a8ee13cabf203611ccdf64345dc1c5d24de8b11e1a678ee619b6" dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", + "crossbeam-utils", ] [[package]] -name = "nom" -version = "5.1.2" +name = "rkyv" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" dependencies = [ - "lexical-core", - "memchr", - "version_check", + "bitvec", + "bytecheck", + "bytes", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", + "tinyvec", + "uuid", ] [[package]] -name = "ntapi" -version = "0.3.6" +name = "rkyv_derive" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ - "winapi", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "num-integer" -version = "0.1.44" +name = "rstest" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "7b423f0e62bdd61734b67cd21ff50871dfaeb9cc74f869dcd6af974fbcb19936" dependencies = [ - "autocfg", - "num-traits 0.2.14", + "futures", + "futures-timer", + "rstest_macros", + "rustc_version", ] [[package]] -name = "num-traits" -version = "0.1.43" +name = "rstest_macros" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" +checksum = "c5e1711e7d14f74b12a58411c542185ef7fb7f2e7f8ee6e2940a883628522b42" dependencies = [ - "num-traits 0.2.14", + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version", + "syn 2.0.77", + "unicode-ident", ] [[package]] -name = "num-traits" -version = "0.2.14" +name = "rusqlite" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" dependencies = [ - "autocfg", + "bitflags", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", ] [[package]] -name = "num_cpus" -version = "1.13.0" +name = "rust_decimal" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" dependencies = [ - "hermit-abi", - "libc", + "arrayvec", + "borsh", + "bytes", + "num-traits", + "rand", + "rkyv", + "serde", + "serde_json", ] [[package]] -name = "once_cell" -version = "1.5.2" +name = "rustc-demangle" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] -name = "opaque-debug" -version = "0.2.3" +name = "rustc-hash" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] -name = "opaque-debug" -version = "0.3.0" +name = "rustc_version" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] [[package]] -name = "openssl" -version = "0.10.36" +name = "rustix" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags", - "cfg-if 1.0.0", - "foreign-types", + "errno", "libc", - "once_cell", - "openssl-sys", + "linux-raw-sys", + "windows-sys 0.52.0", ] [[package]] -name = "openssl-probe" -version = "0.1.4" +name = "rustls" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +dependencies = [ + "aws-lc-rs", + "once_cell", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] [[package]] -name = "openssl-sys" -version = "0.9.66" +name = "rustls-pemfile" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1996d2d305e561b70d1ee0c53f1542833f4e1ac6ce9a6708b6ff2738ca67dc82" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", + "base64 0.22.1", + "rustls-pki-types", ] [[package]] -name = "parking_lot" -version = "0.11.1" +name = "rustls-pki-types" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core", -] +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] -name = "parking_lot_core" -version = "0.8.3" +name = "rustls-webpki" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall", - "smallvec", - "winapi", + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", ] [[package]] -name = "percent-encoding" -version = "2.1.0" +name = "rustversion" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] -name = "pin-project" -version = "1.0.4" +name = "ryu" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b70b68509f17aa2857863b6fa00bf21fc93674c7a8893de2f469f6aa7ca2f2" -dependencies = [ - "pin-project-internal", -] +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] -name = "pin-project-internal" -version = "1.0.4" +name = "same-file" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caa25a6393f22ce819b0f50e0be89287292fda8d425be38ee0ca14c4931d9e71" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ - "proc-macro2", - "quote", - "syn", + "winapi-util", ] [[package]] -name = "pin-project-lite" -version = "0.2.4" +name = "saturating" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" +checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] -name = "pin-utils" -version = "0.1.0" +name = "schannel" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +dependencies = [ + "windows-sys 0.59.0", +] [[package]] -name = "pkg-config" -version = "0.3.19" +name = "scheduled-thread-pool" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +dependencies = [ + "parking_lot", +] [[package]] -name = "ppv-lite86" -version = "0.2.10" +name = "scopeguard" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] -name = "proc-macro-hack" -version = "0.5.19" +name = "seahash" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] -name = "proc-macro-nested" -version = "0.1.7" +name = "security-framework" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] [[package]] -name = "proc-macro2" -version = "1.0.24" +name = "security-framework-sys" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ - "unicode-xid", + "core-foundation-sys", + "libc", ] [[package]] -name = "quick-error" -version = "1.2.3" +name = "semver" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] -name = "quote" -version = "1.0.8" +name = "serde" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ - "proc-macro2", + "serde_derive", ] [[package]] -name = "r2d2" -version = "0.8.9" +name = "serde_bencode" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" +checksum = "a70dfc7b7438b99896e7f8992363ab8e2c4ba26aa5ec675d32d1c3c2c33d413e" dependencies = [ - "log", - "parking_lot", - "scheduled-thread-pool", + "serde", + "serde_bytes", ] [[package]] -name = "r2d2_sqlite" -version = "0.16.0" +name = "serde_bytes" +version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed60ebe88b27ac28c0563bc0fbeaecd302ff53e3a01e5ddc2ec9f4e6c707d929" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" dependencies = [ - "r2d2", - "rusqlite", + "serde", ] [[package]] -name = "rand" -version = "0.7.3" +name = "serde_derive" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] -name = "rand" -version = "0.8.4" +name = "serde_html_form" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", + "form_urlencoded", + "indexmap 2.5.0", + "itoa", + "ryu", + "serde", ] [[package]] -name = "rand_chacha" -version = "0.2.2" +name = "serde_json" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "indexmap 2.5.0", + "itoa", + "memchr", + "ryu", + "serde", ] [[package]] -name = "rand_chacha" -version = "0.3.1" +name = "serde_path_to_error" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ - "ppv-lite86", - "rand_core 0.6.3", + "itoa", + "serde", ] [[package]] -name = "rand_core" -version = "0.5.1" +name = "serde_repr" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ - "getrandom 0.1.16", + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] -name = "rand_core" -version = "0.6.3" +name = "serde_spanned" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" dependencies = [ - "getrandom 0.2.3", + "serde", ] [[package]] -name = "rand_hc" -version = "0.2.0" +name = "serde_urlencoded" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ - "rand_core 0.5.1", + "form_urlencoded", + "itoa", + "ryu", + "serde", ] [[package]] -name = "rand_hc" -version = "0.3.1" +name = "serde_with" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" +checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" dependencies = [ - "rand_core 0.6.3", + "base64 0.22.1", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.5.0", + "serde", + "serde_derive", + "serde_json", + "serde_with_macros", + "time", ] [[package]] -name = "redox_syscall" -version = "0.2.10" +name = "serde_with_macros" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ - "bitflags", + "darling", + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] -name = "regex" -version = "1.5.4" +name = "sha1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", + "cfg-if", + "cpufeatures", + "digest", ] [[package]] -name = "regex-syntax" -version = "0.6.25" +name = "sha2" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "sharded-slab" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ - "winapi", + "lazy_static", ] [[package]] -name = "resolv-conf" -version = "0.7.0" +name = "shlex" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname", - "quick-error", -] +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] -name = "ring" -version = "0.16.20" +name = "signal-hook-registry" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ - "cc", "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi", ] [[package]] -name = "rusqlite" -version = "0.23.1" +name = "simdutf8" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45d0fd62e1df63d254714e6cb40d0a0e82e7a1623e7a27f679d851af092ae58b" -dependencies = [ - "bitflags", - "fallible-iterator", - "fallible-streaming-iterator", - "libsqlite3-sys", - "lru-cache", - "memchr", - "smallvec", - "time", -] +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] -name = "rust-ini" -version = "0.13.0" +name = "siphasher" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] -name = "rustc_version" -version = "0.4.0" +name = "slab" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "semver", + "autocfg", ] [[package]] -name = "rustls" -version = "0.19.1" +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "socket2" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ - "base64", - "log", - "ring", - "sct", - "webpki", + "libc", + "windows-sys 0.52.0", ] [[package]] -name = "ryu" -version = "1.0.5" +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subprocess" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "0c2e86926081dda636c546d8c5e641661049d7562a68f5488be4a1f7f66f6086" +dependencies = [ + "libc", + "winapi", +] [[package]] -name = "safemem" -version = "0.3.3" +name = "subtle" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] -name = "schannel" -version = "0.1.19" +name = "syn" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "lazy_static", - "winapi", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] -name = "scheduled-thread-pool" -version = "0.2.5" +name = "syn" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ - "parking_lot", + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] -name = "scoped-tls" -version = "1.0.0" +name = "syn_derive" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.77", +] [[package]] -name = "scopeguard" -version = "1.1.0" +name = "sync_wrapper" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] -name = "sct" -version = "0.6.1" +name = "sync_wrapper" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" dependencies = [ - "ring", - "untrusted", + "futures-core", ] [[package]] -name = "security-framework" -version = "2.3.1" +name = "system-configuration" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ "bitflags", "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", + "system-configuration-sys", ] [[package]] -name = "security-framework-sys" -version = "2.4.0" +name = "system-configuration-sys" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19133a286e494cc3311c165c4676ccb1fd47bed45b55f9d71fbd784ad4cea6f8" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" dependencies = [ "core-foundation-sys", "libc", ] [[package]] -name = "semver" -version = "1.0.4" +name = "tap" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] -name = "serde" -version = "0.8.23" +name = "tdyne-peer-id" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" +checksum = "6dde285ba6f835045648f9d4f4703f778aaafb47421d9c5dff47be1534370c3e" [[package]] -name = "serde" -version = "1.0.120" +name = "tdyne-peer-id-registry" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "166b2349061381baf54a58e4b13c89369feb0ef2eaa57198899e2312aac30aab" +checksum = "1923b2d356e080e8bee847c39b58de293309df2fe0bc9ecd859ae3210e868c25" dependencies = [ - "serde_derive", + "phf", + "phf_codegen", + "tdyne-peer-id", ] [[package]] -name = "serde-hjson" -version = "0.9.1" +name = "tempfile" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a3a4e0ea8a88553209f6cc6cfe8724ecad22e1acf372793c27d995290fe74f8" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ - "lazy_static", - "num-traits 0.1.43", - "regex", - "serde 0.8.23", + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", ] [[package]] -name = "serde_bencode" -version = "0.2.3" +name = "termcolor" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ - "serde 1.0.120", - "serde_bytes", + "winapi-util", ] [[package]] -name = "serde_bytes" -version = "0.11.5" +name = "termtree" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" + +[[package]] +name = "thiserror" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ - "serde 1.0.120", + "thiserror-impl", ] [[package]] -name = "serde_derive" -version = "1.0.120" +name = "thiserror-impl" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca2a8cb5805ce9e3b95435e3765b7b553cecc762d938d409434338386cb5775" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.77", ] [[package]] -name = "serde_json" -version = "1.0.72" +name = "thread_local" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ffa0837f2dfa6fb90868c2b5468cad482e175f7dad97e7421951e663f2b527" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "itoa", - "ryu", - "serde 1.0.120", + "cfg-if", + "once_cell", ] [[package]] -name = "serde_urlencoded" -version = "0.7.0" +name = "time" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ - "form_urlencoded", + "deranged", "itoa", - "ryu", - "serde 1.0.120", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", ] [[package]] -name = "sha-1" -version = "0.8.2" +name = "time-core" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] -name = "sha-1" -version = "0.9.8" +name = "time-macros" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", + "num-conv", + "time-core", ] [[package]] -name = "signal-hook-registry" -version = "1.3.0" +name = "tinytemplate" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ - "libc", + "serde", + "serde_json", ] [[package]] -name = "slab" -version = "0.4.2" +name = "tinyvec" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +dependencies = [ + "tinyvec_macros", +] [[package]] -name = "smallvec" -version = "1.6.1" +name = "tinyvec_macros" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] -name = "socket2" -version = "0.3.19" +name = "tokio" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ - "cfg-if 1.0.0", + "backtrace", + "bytes", "libc", - "winapi", + "mio", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", ] [[package]] -name = "spin" -version = "0.5.2" +name = "tokio-macros" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] [[package]] -name = "static_assertions" -version = "1.1.0" +name = "tokio-native-tls" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] [[package]] -name = "syn" -version = "1.0.67" +name = "tokio-rustls" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6498a9efc342871f91cc2d0d694c674368b4ceb40f62b65a7a08c3792935e702" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", + "rustls", + "rustls-pki-types", + "tokio", ] [[package]] -name = "tempfile" -version = "3.2.0" +name = "tokio-util" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ - "cfg-if 1.0.0", - "libc", - "rand 0.8.4", - "redox_syscall", - "remove_dir_all", - "winapi", + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", ] [[package]] -name = "termcolor" -version = "1.1.2" +name = "toml" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ - "winapi-util", + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", ] [[package]] -name = "thiserror" -version = "1.0.26" +name = "toml_datetime" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ - "thiserror-impl", + "serde", ] [[package]] -name = "thiserror-impl" -version = "1.0.26" +name = "toml_edit" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ - "proc-macro2", - "quote", - "syn", + "indexmap 2.5.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", ] [[package]] -name = "time" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +name = "torrust-tracker" +version = "3.0.0" +dependencies = [ + "anyhow", + "aquatic_udp_protocol", + "axum", + "axum-client-ip", + "axum-extra", + "axum-server", + "camino", + "chrono", + "clap", + "crossbeam-skiplist", + "dashmap", + "derive_more", + "figment", + "futures", + "futures-util", + "hex-literal", + "http-body", + "hyper", + "hyper-util", + "lazy_static", + "local-ip-address", + "mockall", + "multimap", + "parking_lot", + "percent-encoding", + "pin-project-lite", + "r2d2", + "r2d2_mysql", + "r2d2_sqlite", + "rand", + "regex", + "reqwest", + "ringbuf", + "serde", + "serde_bencode", + "serde_bytes", + "serde_json", + "serde_repr", + "serde_with", + "thiserror", + "tokio", + "torrust-tracker-clock", + "torrust-tracker-configuration", + "torrust-tracker-contrib-bencode", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", + "tower 0.4.13", + "tower-http", + "trace", + "tracing", + "tracing-subscriber", + "url", + "uuid", + "zerocopy", +] + +[[package]] +name = "torrust-tracker-clock" +version = "3.0.0" dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", + "chrono", + "lazy_static", + "torrust-tracker-primitives", ] [[package]] -name = "tinyvec" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" +name = "torrust-tracker-configuration" +version = "3.0.0" dependencies = [ - "tinyvec_macros", + "camino", + "derive_more", + "figment", + "serde", + "serde_json", + "serde_with", + "thiserror", + "toml", + "torrust-tracker-located-error", + "url", + "uuid", ] [[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +name = "torrust-tracker-contrib-bencode" +version = "3.0.0" +dependencies = [ + "criterion", + "thiserror", +] [[package]] -name = "tokio" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c79ba603c337335df6ba6dd6afc38c38a7d5e1b0c871678439ea973cd62a118e" +name = "torrust-tracker-located-error" +version = "3.0.0" dependencies = [ - "autocfg", - "bytes", - "libc", - "memchr", - "mio", - "num_cpus", - "once_cell", - "pin-project-lite", - "signal-hook-registry", - "tokio-macros", - "winapi", + "thiserror", + "tracing", ] [[package]] -name = "tokio-macros" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" +name = "torrust-tracker-primitives" +version = "3.0.0" dependencies = [ - "proc-macro2", - "quote", - "syn", + "aquatic_udp_protocol", + "binascii", + "derive_more", + "serde", + "tdyne-peer-id", + "tdyne-peer-id-registry", + "thiserror", + "zerocopy", ] [[package]] -name = "tokio-native-tls" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +name = "torrust-tracker-test-helpers" +version = "3.0.0" dependencies = [ - "native-tls", - "tokio", + "rand", + "torrust-tracker-configuration", ] [[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +name = "torrust-tracker-torrent-repository" +version = "3.0.0" dependencies = [ - "rustls", + "aquatic_udp_protocol", + "async-std", + "criterion", + "crossbeam-skiplist", + "dashmap", + "futures", + "parking_lot", + "rstest", "tokio", - "webpki", + "torrust-tracker-clock", + "torrust-tracker-configuration", + "torrust-tracker-primitives", + "zerocopy", ] [[package]] -name = "tokio-stream" -version = "0.1.2" +name = "tower" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76066865172052eb8796c686f0b441a93df8b08d40a950b062ffb9a426f00edd" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", + "futures-util", + "pin-project", "pin-project-lite", "tokio", + "tower-layer", + "tower-service", + "tracing", ] [[package]] -name = "tokio-tungstenite" -version = "0.13.0" +name = "tower" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ + "futures-core", "futures-util", - "log", - "pin-project", + "pin-project-lite", + "sync_wrapper 0.1.2", "tokio", - "tungstenite", + "tower-layer", + "tower-service", + "tracing", ] [[package]] -name = "tokio-util" -version = "0.6.7" +name = "tower-http" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ + "async-compression", + "bitflags", "bytes", "futures-core", - "futures-sink", - "log", + "http", + "http-body", "pin-project-lite", "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", + "uuid", ] [[package]] -name = "toml" -version = "0.5.8" +name = "tower-layer" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" -dependencies = [ - "serde 1.0.120", -] +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] -name = "torrust-tracker" -version = "2.0.1" -dependencies = [ - "binascii", - "byteorder", - "chrono", - "config", - "derive_more", - "env_logger", - "external-ip", - "fern", - "hex", - "log", - "percent-encoding", - "r2d2", - "r2d2_sqlite", - "rand 0.8.4", - "serde 1.0.120", - "serde_bencode", - "serde_bytes", - "serde_json", - "tokio", - "toml", - "warp", -] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] -name = "tower-service" -version = "0.3.0" +name = "trace" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "9ad0c048e114d19d1140662762bfdb10682f3bc806d8be18af846600214dd9af" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] [[package]] name = "tracing" -version = "0.1.22" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if 1.0.0", "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] [[package]] -name = "tracing-core" -version = "0.1.17" +name = "tracing-attributes" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "lazy_static", + "proc-macro2", + "quote", + "syn 2.0.77", ] [[package]] -name = "trust-dns-proto" -version = "0.20.3" +name = "tracing-core" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ - "async-trait", - "cfg-if 1.0.0", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna", - "ipnet", - "lazy_static", - "log", - "rand 0.8.4", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "url", + "once_cell", + "valuable", ] [[package]] -name = "trust-dns-resolver" -version = "0.20.3" +name = "tracing-log" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ - "cfg-if 1.0.0", - "futures-util", - "ipconfig", - "lazy_static", "log", - "lru-cache", - "parking_lot", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "trust-dns-proto", + "once_cell", + "tracing-core", ] [[package]] -name = "try-lock" -version = "0.2.3" +name = "tracing-serde" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +dependencies = [ + "serde", + "tracing-core", +] [[package]] -name = "tungstenite" -version = "0.12.0" +name = "tracing-subscriber" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ - "base64", - "byteorder", - "bytes", - "http", - "httparse", - "input_buffer", - "log", - "rand 0.8.4", - "sha-1 0.9.8", - "url", - "utf-8", + "nu-ansi-term", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] -name = "twoway" -version = "0.1.8" +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "twox-hash" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "memchr", + "cfg-if", + "rand", + "static_assertions", ] [[package]] name = "typenum" -version = "1.12.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] -name = "unicase" -version = "2.6.0" +name = "uncased" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" -version = "0.3.6" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + +[[package]] +name = "unicode-ident" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" - [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "untrusted" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.2.2" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", + "serde", ] [[package]] -name = "utf-8" -version = "0.7.6" +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +dependencies = [ + "getrandom", + "rand", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "value-bag" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" [[package]] name = "vcpkg" @@ -1938,92 +4177,78 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] -name = "want" -version = "0.3.0" +name = "walkdir" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ - "log", - "try-lock", + "same-file", + "winapi-util", ] [[package]] -name = "warp" +name = "want" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332d47745e9a0c38636dbd454729b147d16bd1ed08ae67b3ab281c4506771054" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "bytes", - "futures", - "headers", - "http", - "hyper", - "log", - "mime", - "mime_guess", - "multipart", - "percent-encoding", - "pin-project", - "scoped-tls", - "serde 1.0.120", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-rustls", - "tokio-stream", - "tokio-tungstenite", - "tokio-util", - "tower-service", - "tracing", + "try-lock", ] [[package]] name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.78" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.78" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.77", "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" -version = "0.2.78" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2031,55 +4256,45 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.78" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.78" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] -name = "webpki" -version = "0.21.4" +name = "which" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ - "ring", - "untrusted", + "either", + "home", + "once_cell", + "rustix", ] -[[package]] -name = "widestring" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" - -[[package]] -name = "wildmatch" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f44b95f62d34113cf558c93511ac93027e03e9c29a60dd0fd70e6e025c7270a" - [[package]] name = "winapi" version = "0.3.9" @@ -2098,11 +4313,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "winapi", + "windows-sys 0.59.0", ] [[package]] @@ -2112,34 +4327,201 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "winreg" -version = "0.6.2" +name = "windows-core" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "winapi", + "windows-targets", ] [[package]] -name = "xml-rs" -version = "0.8.4" +name = "windows-registry" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets", +] [[package]] -name = "xmltree" -version = "0.10.3" +name = "windows-result" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" dependencies = [ - "xml-rs", + "windows-targets", ] [[package]] -name = "yaml-rust" -version = "0.4.5" +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +dependencies = [ + "memchr", +] + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zstd" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ - "linked-hash-map", + "cc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index db9b89856..e362dafe7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,32 +1,117 @@ [package] +default-run = "torrust-tracker" name = "torrust-tracker" -version = "2.0.1" -authors = ["Mick van Dijke ", "Naim A. "] +readme = "README.md" + +authors.workspace = true +description.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[workspace.package] +authors = ["Nautilus Cyberneering , Mick van Dijke "] +categories = ["network-programming", "web-programming"] description = "A feature rich BitTorrent tracker." -edition = "2018" +documentation = "https://docs.rs/crate/torrust-tracker/" +edition = "2021" +homepage = "https://torrust.com/" +keywords = ["bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker"] +license = "AGPL-3.0-only" +publish = true +repository = "https://github.com/torrust/torrust-tracker" +rust-version = "1.72" +version = "3.0.0" + +[dependencies] +anyhow = "1" +aquatic_udp_protocol = "0" +axum = { version = "0", features = ["macros"] } +axum-client-ip = "0" +axum-extra = { version = "0", features = ["query"] } +axum-server = { version = "0", features = ["tls-rustls"] } +camino = { version = "1", features = ["serde", "serde1"] } +chrono = { version = "0", default-features = false, features = ["clock"] } +clap = { version = "4", features = ["derive", "env"] } +crossbeam-skiplist = "0" +dashmap = "6" +derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +figment = "0" +futures = "0" +futures-util = "0" +hex-literal = "0" +http-body = "1" +hyper = "1" +hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } +lazy_static = "1" +multimap = "0" +parking_lot = "0" +percent-encoding = "2" +pin-project-lite = "0" +r2d2 = "0" +r2d2_mysql = "25" +r2d2_sqlite = { version = "0", features = ["bundled"] } +rand = "0" +regex = "1" +reqwest = { version = "0", features = ["json"] } +ringbuf = "0" +serde = { version = "1", features = ["derive"] } +serde_bencode = "0" +serde_bytes = "0" +serde_json = { version = "1", features = ["preserve_order"] } +serde_repr = "0" +serde_with = { version = "3", features = ["json"] } +thiserror = "1" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-clock = { version = "3.0.0", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0", path = "packages/torrent-repository" } +tower = { version = "0", features = ["timeout"] } +tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +trace = "0" +tracing = "0" +tracing-subscriber = { version = "0", features = ["json"] } +url = { version = "2", features = ["serde"] } +uuid = { version = "1", features = ["v4"] } +zerocopy = "0" + +[package.metadata.cargo-machete] +ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes"] + +[dev-dependencies] +local-ip-address = "0" +mockall = "0" +torrust-tracker-test-helpers = { version = "3.0.0", path = "packages/test-helpers" } + +[workspace] +members = [ + "contrib/bencode", + "packages/configuration", + "packages/located-error", + "packages/primitives", + "packages/test-helpers", + "packages/torrent-repository", +] + +[profile.dev] +debug = 1 +lto = "fat" +opt-level = 1 [profile.release] +debug = 1 lto = "fat" +opt-level = 3 -[dependencies] -serde = {version = "1.0", features = ["derive"]} -serde_bencode = "^0.2.3" -serde_bytes = "0.11" -serde_json = "1.0.72" -hex = "0.4.3" -percent-encoding = "2.1.0" -warp = {version = "0.3", features = ["tls"]} -tokio = {version = "1.7", features = ["macros", "io-util", "net", "time", "rt-multi-thread", "fs", "sync", "signal"]} -binascii = "0.1" -toml = "0.5" -log = {version = "0.4", features = ["release_max_level_info"]} -fern = "0.6" -chrono = "0.4" -byteorder = "1" -external-ip = "4.1.0" -r2d2_sqlite = "0.16.0" -r2d2 = "0.8.8" -rand = "0.8.4" -env_logger = "0.9.0" -config = "0.11" -derive_more = "0.99" +[profile.release-debug] +debug = true +inherits = "release" diff --git a/Containerfile b/Containerfile new file mode 100644 index 000000000..263053390 --- /dev/null +++ b/Containerfile @@ -0,0 +1,145 @@ +# syntax=docker/dockerfile:latest + +# Torrust Tracker + +## Builder Image +FROM docker.io/library/rust:bookworm AS chef +WORKDIR /tmp +RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +RUN cargo binstall --no-confirm cargo-chef cargo-nextest + +## Tester Image +FROM docker.io/library/rust:slim-bookworm AS tester +WORKDIR /tmp + +RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean +RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +RUN cargo binstall --no-confirm cargo-nextest + +COPY ./share/ /app/share/torrust +RUN mkdir -p /app/share/torrust/default/database/; \ + sqlite3 /app/share/torrust/default/database/tracker.sqlite3.db "VACUUM;" + +## Su Exe Compile +FROM docker.io/library/gcc:bookworm AS gcc +COPY ./contrib/dev-tools/su-exec/ /usr/local/src/su-exec/ +RUN cc -Wall -Werror -g /usr/local/src/su-exec/su-exec.c -o /usr/local/bin/su-exec; chmod +x /usr/local/bin/su-exec + + +## Chef Prepare (look at project and see wat we need) +FROM chef AS recipe +WORKDIR /build/src +COPY . /build/src +RUN cargo chef prepare --recipe-path /build/recipe.json + + +## Cook (debug) +FROM chef AS dependencies_debug +WORKDIR /build/src +COPY --from=recipe /build/recipe.json /build/recipe.json +RUN cargo chef cook --tests --benches --examples --workspace --all-targets --all-features --recipe-path /build/recipe.json +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/temp.tar.zst ; rm -f /build/temp.tar.zst + +## Cook (release) +FROM chef AS dependencies +WORKDIR /build/src +COPY --from=recipe /build/recipe.json /build/recipe.json +RUN cargo chef cook --tests --benches --examples --workspace --all-targets --all-features --recipe-path /build/recipe.json --release +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/temp.tar.zst --release ; rm -f /build/temp.tar.zst + + +## Build Archive (debug) +FROM dependencies_debug AS build_debug +WORKDIR /build/src +COPY . /build/src +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/torrust-tracker-debug.tar.zst + +## Build Archive (release) +FROM dependencies AS build +WORKDIR /build/src +COPY . /build/src +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/torrust-tracker.tar.zst --release + + +# Extract and Test (debug) +FROM tester AS test_debug +WORKDIR /test +COPY . /test/src/ +COPY --from=build_debug \ + /build/torrust-tracker-debug.tar.zst \ + /test/torrust-tracker-debug.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --extract-to /test/src/ --no-run --archive-file /test/torrust-tracker-debug.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --target-dir-remap /test/src/target/ --cargo-metadata /test/src/target/nextest/cargo-metadata.json --binaries-metadata /test/src/target/nextest/binaries-metadata.json + +RUN mkdir -p /app/bin/; cp -l /test/src/target/debug/torrust-tracker /app/bin/torrust-tracker +RUN mkdir /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-tracker | grep "libz\.so\.1" | awk '{print $3}')) /app/lib/libz.so.1 +RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin + +# Extract and Test (release) +FROM tester AS test +WORKDIR /test +COPY . /test/src +COPY --from=build \ + /build/torrust-tracker.tar.zst \ + /test/torrust-tracker.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --extract-to /test/src/ --no-run --archive-file /test/torrust-tracker.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --target-dir-remap /test/src/target/ --cargo-metadata /test/src/target/nextest/cargo-metadata.json --binaries-metadata /test/src/target/nextest/binaries-metadata.json + +RUN mkdir -p /app/bin/; cp -l /test/src/target/release/torrust-tracker /app/bin/torrust-tracker; cp -l /test/src/target/release/http_health_check /app/bin/http_health_check +RUN mkdir -p /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-tracker | grep "libz\.so\.1" | awk '{print $3}')) /app/lib/libz.so.1 +RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin + + +## Runtime +FROM gcr.io/distroless/cc-debian12:debug AS runtime +RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/env", "/bin/"] +COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec + +ARG TORRUST_TRACKER_CONFIG_TOML_PATH="/etc/torrust/tracker/tracker.toml" +ARG TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER="sqlite3" +ARG USER_ID=1000 +ARG UDP_PORT=6969 +ARG HTTP_PORT=7070 +ARG API_PORT=1212 +ARG HEALTH_CHECK_API_PORT=1313 + +ENV TORRUST_TRACKER_CONFIG_TOML_PATH=${TORRUST_TRACKER_CONFIG_TOML_PATH} +ENV TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER} +ENV USER_ID=${USER_ID} +ENV UDP_PORT=${UDP_PORT} +ENV HTTP_PORT=${HTTP_PORT} +ENV API_PORT=${API_PORT} +ENV HEALTH_CHECK_API_PORT=${HEALTH_CHECK_API_PORT} +ENV TZ=Etc/UTC + +EXPOSE ${UDP_PORT}/udp +EXPOSE ${HTTP_PORT}/tcp +EXPOSE ${API_PORT}/tcp +EXPOSE ${HEALTH_CHECK_API_PORT}/tcp + +RUN mkdir -p /var/lib/torrust/tracker /var/log/torrust/tracker /etc/torrust/tracker + +ENV ENV=/etc/profile +COPY --chmod=0555 ./share/container/entry_script_sh /usr/local/bin/entry.sh + +VOLUME ["/var/lib/torrust/tracker","/var/log/torrust/tracker","/etc/torrust/tracker"] + +ENV RUNTIME="runtime" +ENTRYPOINT ["/usr/local/bin/entry.sh"] + + +## Torrust-Tracker (debug) +FROM runtime AS debug +ENV RUNTIME="debug" +COPY --from=test_debug /app/ /usr/ +RUN env +CMD ["sh"] + +## Torrust-Tracker (release) (default) +FROM runtime AS release +ENV RUNTIME="release" +COPY --from=test /app/ /usr/ +HEALTHCHECK --interval=5s --timeout=5s --start-period=3s --retries=3 \ + CMD /usr/bin/http_health_check http://localhost:${HEALTH_CHECK_API_PORT}/health_check \ + || exit 1 +CMD ["/usr/bin/torrust-tracker"] diff --git a/NOTICE b/NOTICE new file mode 100644 index 000000000..e69de29bb diff --git a/README.md b/README.md index 9e09a7739..6d611d9a5 100644 --- a/README.md +++ b/README.md @@ -1,67 +1,268 @@ # Torrust Tracker -![Test](https://github.com/torrust/torrust-tracker/actions/workflows/test_build_release.yml/badge.svg) -## Project Description -Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTorrent tracker made using Rust. +[![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![deployment_wf_b]][deployment_wf] [![testing_wf_b]][testing_wf] +**Torrust Tracker** is a [BitTorrent][bittorrent] Tracker that matchmakes peers and collects statistics. Written in [Rust Language][rust] with the [Axum] web framework. **This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).** -### Features -* [X] UDP server -* [X] HTTP (optional SSL) server -* [X] Private & Whitelisted mode -* [X] Built-in API -* [X] Torrent whitelisting -* [X] Peer authentication using time-bound keys +> This is a [Torrust][torrust] project and is in active development. It is community supported as well as sponsored by [Nautilus Cyberneering][nautilus]. -### Implemented BEPs -* [BEP 15](http://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for BitTorrent -* [BEP 23](http://bittorrent.org/beps/bep_0023.html): Tracker Returns Compact Peer Lists -* [BEP 27](http://bittorrent.org/beps/bep_0027.html): Private Torrents -* [BEP 41](http://bittorrent.org/beps/bep_0041.html): UDP Tracker Protocol Extensions -* [BEP 48](http://bittorrent.org/beps/bep_0048.html): Tracker Protocol Extension: Scrape +## Key Features + +- [x] High Quality and Modern Rust Codebase. +- [x] [Documentation][docs] Generated from Code Comments. +- [x] [Comprehensive Suit][coverage] of Unit and Functional Tests. +- [x] Good Performance in Busy Conditions. +- [x] Support for `UDP`, `HTTP`, and `TLS` Sockets. +- [x] Native `IPv4` and `IPv6` support. +- [x] Private & Whitelisted mode. +- [x] Tracker Management API. +- [x] Support [newTrackon][newtrackon] checks. +- [x] Persistent `SQLite3` or `MySQL` Databases. + +## Roadmap + +Core: + +- [ ] New option `want_ip_from_query_string`. See . +- [ ] Peer and torrents specific statistics. See . + +Persistence: + +- [ ] Support other databases like PostgreSQL. + +Performance: + +- [ ] More optimizations. See . + +Protocols: + +- [ ] WebTorrent. + +Integrations: + +- [ ] Monitoring (Prometheus). + +Utils: + +- [ ] Tracker client. +- [ ] Tracker checker. + +Others: + +- [ ] Support for Windows. +- [ ] Docker images for other architectures. + + + +## Implemented BitTorrent Enhancement Proposals (BEPs) +> +> _[Learn more about BitTorrent Enhancement Proposals][BEP 00]_ + +- [BEP 03]: The BitTorrent Protocol. +- [BEP 07]: IPv6 Support. +- [BEP 15]: UDP Tracker Protocol for BitTorrent. +- [BEP 23]: Tracker Returns Compact Peer Lists. +- [BEP 27]: Private Torrents. +- [BEP 48]: Tracker Protocol Extension: Scrape. ## Getting Started -You can get the latest binaries from [releases](https://github.com/torrust/torrust-tracker/releases) or follow the install from scratch instructions below. -### Install From Scratch -1. Clone the repo. -```bash +### Container Version + +The Torrust Tracker is [deployed to DockerHub][dockerhub], you can run a demo immediately with the following commands: + +#### Docker + +```sh +docker run -it torrust/tracker:develop +``` + +> Please read our [container guide][containers.md] for more information. + +#### Podman + +```sh +podman run -it docker.io/torrust/tracker:develop +``` + +> Please read our [container guide][containers.md] for more information. + +### Development Version + +- Please ensure you have the _**[latest stable (or nightly) version of rust][rust]___. +- Please ensure that your computer has enough RAM. _**Recommended 16GB.___ + +#### Checkout, Test and Run + +```sh +# Checkout repository into a new folder: git clone https://github.com/torrust/torrust-tracker.git + +# Change into directory and create a empty database file: cd torrust-tracker -``` +mkdir -p ./storage/tracker/lib/database/ +touch ./storage/tracker/lib/database/sqlite3.db -2. Build the source code. -```bash -cargo build --release +# Check all tests in application: +cargo test --tests --benches --examples --workspace --all-targets --all-features + +# Run the tracker: +cargo run ``` -3. Copy binaries: `torrust-tracker/target/torrust-tracker` to a new folder. +#### Customization -### Usage -1. Navigate to the folder you put the torrust-tracker binaries in. +```sh +# Copy the default configuration into the standard location: +mkdir -p ./storage/tracker/etc/ +cp ./share/default/config/tracker.development.sqlite3.toml ./storage/tracker/etc/tracker.toml +# Customize the tracker configuration (for example): +vim ./storage/tracker/etc/tracker.toml -2. Run the torrust-tracker once to create the `config.toml` file: -```bash -./torrust-tracker +# Run the tracker with the updated configuration: +TORRUST_TRACKER_CONFIG_TOML_PATH="./storage/tracker/etc/tracker.toml" cargo run ``` +_Optionally, you may choose to supply the entire configuration as an environmental variable:_ + +```sh +# Use a configuration supplied on an environmental variable: +TORRUST_TRACKER_CONFIG_TOML=$(cat "./storage/tracker/etc/tracker.toml") cargo run +``` -3. Edit the newly created config.toml file in the same folder as your torrust-tracker binaries according to your liking. See [configuration documentation](https://torrust.github.io/torrust-documentation/torrust-tracker/config/). +_For deployment, you **should** override the `api_admin_token` by using an environmental variable:_ +```sh +# Generate a Secret Token: +gpg --armor --gen-random 1 10 | tee ./storage/tracker/lib/tracker_api_admin_token.secret +chmod go-rwx ./storage/tracker/lib/tracker_api_admin_token.secret -4. Run the torrust-tracker again: -```bash -./torrust-tracker +# Override secret in configuration using an environmental variable: +TORRUST_TRACKER_CONFIG_TOML=$(cat "./storage/tracker/etc/tracker.toml") \ + TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN=$(cat "./storage/tracker/lib/tracker_api_admin_token.secret") \ + cargo run ``` -### Tracker URL -Your tracker will be `udp://tracker-ip:port/announce` or `https://tracker-ip:port/announce` depending on your tracker mode. -In private mode, tracker keys are added after the tracker URL like: `https://tracker-ip:port/announce/tracker-key`. +> Please view our [crate documentation][docs] for more detailed instructions. + +### Services + +The following services are provided by the default configuration: + +- UDP _(tracker)_ + - `udp://127.0.0.1:6969/announce`. +- HTTP _(tracker)_ + - `http://127.0.0.1:7070/announce`. +- API _(management)_ + - `http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken`. + +## Documentation + +You can read the [latest documentation][docs] from . + +Some specific sections: + +- [Management API (Version 1)][API] +- [Tracker (HTTP/TLS)][HTTP] +- [Tracker (UDP)][UDP] + +## Benchmarking + +- [Benchmarking](./docs/benchmarking.md) + +## Contributing + +We are happy to support and welcome new people to our project. Please consider our [contributor guide][guide.md].
+This is an open-source community-supported project. We welcome contributions from the community! + +**How can you contribute?** + +- Bug reports and feature requests. +- Code contributions. You can start by looking at the issues labeled "[good first issues]". +- Documentation improvements. Check the [documentation][docs] and [API documentation][API] for typos, errors, or missing information. +- Participation in the community. You can help by answering questions in the [discussions]. + +## License + +**Copyright (c) 2023 The Torrust Developers.** + +This program is free software: you can redistribute it and/or modify it under the terms of the [GNU Affero General Public License][AGPL_3_0] as published by the [Free Software Foundation][FSF], version 3. + +This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Affero General Public License][AGPL_3_0] for more details. + +You should have received a copy of the *GNU Affero General Public License* along with this program. If not, see . + +Some files include explicit copyright notices and/or license notices. + +### Legacy Exception + +For prosperity, versions of Torrust Tracker that are older than five years are automatically granted the [MIT-0][MIT_0] license in addition to the existing [AGPL-3.0-only][AGPL_3_0] license. + +## Contributor Agreement + +The copyright of the Torrust Tracker is retained by the respective authors. + +**Contributors agree:** + +- That all their contributions be granted a license(s) **compatible** with the [Torrust Trackers License](#license). +- That all contributors signal **clearly** and **explicitly** any other compilable licenses if they are not: _[AGPL-3.0-only with the legacy MIT-0 exception](#license)_. + +**The Torrust-Tracker project has no copyright assignment agreement.** + +_We kindly ask you to take time and consider The Torrust Project [Contributor Agreement][agreement.md] in full._ + +## Acknowledgments + +This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [Dutch Bits]. Also thanks to [Naim A.] and [greatest-ape] for some parts of the code. Further added features and functions thanks to [Power2All]. + +[container_wf]: ../../actions/workflows/container.yaml +[container_wf_b]: ../../actions/workflows/container.yaml/badge.svg +[coverage_wf]: ../../actions/workflows/coverage.yaml +[coverage_wf_b]: ../../actions/workflows/coverage.yaml/badge.svg +[deployment_wf]: ../../actions/workflows/deployment.yaml +[deployment_wf_b]: ../../actions/workflows/deployment.yaml/badge.svg +[testing_wf]: ../../actions/workflows/testing.yaml +[testing_wf_b]: ../../actions/workflows/testing.yaml/badge.svg + +[bittorrent]: http://bittorrent.org/ +[rust]: https://www.rust-lang.org/ +[axum]: https://github.com/tokio-rs/axum +[newtrackon]: https://newtrackon.com/ +[coverage]: https://app.codecov.io/gh/torrust/torrust-tracker +[torrust]: https://torrust.com/ + +[dockerhub]: https://hub.docker.com/r/torrust/tracker/tags + +[torrent_source_felid]: https://github.com/qbittorrent/qBittorrent/discussions/19406 + +[BEP 00]: https://www.bittorrent.org/beps/bep_0000.html +[BEP 03]: https://www.bittorrent.org/beps/bep_0003.html +[BEP 07]: https://www.bittorrent.org/beps/bep_0007.html +[BEP 15]: https://www.bittorrent.org/beps/bep_0015.html +[BEP 23]: https://www.bittorrent.org/beps/bep_0023.html +[BEP 27]: https://www.bittorrent.org/beps/bep_0027.html +[BEP 48]: https://www.bittorrent.org/beps/bep_0048.html + +[containers.md]: ./docs/containers.md + +[docs]: https://docs.rs/torrust-tracker/latest/ +[api]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/udp + +[good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 +[discussions]: https://github.com/torrust/torrust-tracker/discussions + +[guide.md]: https://github.com/torrust/.github/blob/main/info/contributing.md +[agreement.md]: https://github.com/torrust/.github/blob/main/info/licensing/contributor_agreement_v01.md -### Built-in API -Read the API documentation [here](https://torrust.github.io/torrust-documentation/torrust-tracker/api/). +[AGPL_3_0]: ./docs/licenses/LICENSE-AGPL_3_0 +[MIT_0]: ./docs/licenses/LICENSE-MIT_0 +[FSF]: https://www.fsf.org/ -### Credits -This project was a joint effort by [Nautilus Cyberneering GmbH](https://nautilus-cyberneering.de/) and [Dutch Bits](https://dutchbits.nl). -Also thanks to [Naim A.](https://github.com/naim94a/udpt) and [greatest-ape](https://github.com/greatest-ape/aquatic) for some parts of the code. +[nautilus]: https://github.com/orgs/Nautilus-Cyberneering/ +[Dutch Bits]: https://dutchbits.nl +[Naim A.]: https://github.com/naim94a/udpt +[greatest-ape]: https://github.com/greatest-ape/aquatic +[Power2All]: https://github.com/power2all diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..b36d27978 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,27 @@ +# Security Policy + +Thanks for helping make Torrust Tracker safe for everyone. + +## Security + +[Torrust](https://github.com/torrust) takes the security of our software products and services seriously. + +## Reporting Security Issues + +If you believe you have found a security vulnerability in any of our repositories, please report it to us through coordinated disclosure. + +**Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.** + +Instead, please send an email to info[@]nautilus-cyberneering.de. + +Please include as much of the information listed below as you can to help us better understand and resolve the issue: + +- The type of issue (e.g., buffer overflow, SQL injection, or cross-site scripting) +- Full paths of source file(s) related to the manifestation of the issue +- The location of the affected source code (tag/branch/commit or direct URL) +- Any special configuration required to reproduce the issue +- Step-by-step instructions to reproduce the issue +- Proof-of-concept or exploit code (if possible) +- Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. diff --git a/cSpell.json b/cSpell.json new file mode 100644 index 000000000..6a9da0324 --- /dev/null +++ b/cSpell.json @@ -0,0 +1,189 @@ +{ + "words": [ + "Addrs", + "adduser", + "alekitto", + "appuser", + "Arvid", + "asyn", + "autoclean", + "AUTOINCREMENT", + "automock", + "Avicora", + "Azureus", + "bdecode", + "bencode", + "bencoded", + "beps", + "binascii", + "binstall", + "Bitflu", + "bools", + "Bragilevsky", + "bufs", + "buildid", + "Buildx", + "byteorder", + "callgrind", + "camino", + "canonicalize", + "canonicalized", + "certbot", + "chrono", + "clippy", + "codecov", + "codegen", + "completei", + "Condvar", + "connectionless", + "Containerfile", + "conv", + "curr", + "cvar", + "Cyberneering", + "dashmap", + "datagram", + "datetime", + "debuginfo", + "Deque", + "Dijke", + "distroless", + "dockerhub", + "downloadedi", + "dtolnay", + "elif", + "Eray", + "filesd", + "flamegraph", + "Freebox", + "FrostegÃ¥rd", + "gecos", + "Grcov", + "hasher", + "heaptrack", + "hexlify", + "hlocalhost", + "Hydranode", + "hyperthread", + "Icelake", + "iiiiiiiiiiiiiiiiiiiid", + "imdl", + "impls", + "incompletei", + "infohash", + "infohashes", + "infoschema", + "Intermodal", + "intervali", + "Joakim", + "kallsyms", + "Karatay", + "kcachegrind", + "kexec", + "keyout", + "kptr", + "lcov", + "leecher", + "leechers", + "libsqlite", + "libtorrent", + "libz", + "LOGNAME", + "Lphant", + "matchmakes", + "metainfo", + "middlewares", + "misresolved", + "mockall", + "multimap", + "myacicontext", + "Naim", + "nanos", + "newkey", + "nextest", + "nocapture", + "nologin", + "nonroot", + "Norberg", + "numwant", + "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7", + "oneshot", + "ostr", + "Pando", + "peekable", + "peerlist", + "programatik", + "proot", + "proto", + "Quickstart", + "Radeon", + "Rasterbar", + "realpath", + "reannounce", + "Registar", + "repr", + "reqs", + "reqwest", + "rerequests", + "ringbuf", + "ringsize", + "rngs", + "rosegment", + "routable", + "rstest", + "rusqlite", + "RUSTDOCFLAGS", + "RUSTFLAGS", + "rustfmt", + "Rustls", + "Ryzen", + "Seedable", + "serde", + "Shareaza", + "sharktorrent", + "SHLVL", + "skiplist", + "slowloris", + "socketaddr", + "sqllite", + "subsec", + "Swatinem", + "Swiftbit", + "taiki", + "tdyne", + "tempfile", + "thiserror", + "tlsv", + "Torrentstorm", + "torrust", + "torrustracker", + "trackerid", + "Trackon", + "typenum", + "Unamed", + "untuple", + "uroot", + "Vagaa", + "valgrind", + "Vitaly", + "vmlinux", + "Vuze", + "Weidendorfer", + "Werror", + "whitespaces", + "Xacrimon", + "XBTT", + "Xdebug", + "Xeon", + "Xtorrent", + "Xunlei", + "xxxxxxxxxxxxxxxxxxxxd", + "yyyyyyyyyyyyyyyyyyyyd", + "zerocopy" + ], + "enableFiletypes": [ + "dockerfile", + "shellscript", + "toml" + ] +} diff --git a/codecov.yaml b/codecov.yaml new file mode 100644 index 000000000..aaa25bf74 --- /dev/null +++ b/codecov.yaml @@ -0,0 +1,10 @@ +coverage: + status: + project: + default: + target: auto + threshold: 0.5% + patch: + default: + target: auto + threshold: 0.5% diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 000000000..c2e7c63bd --- /dev/null +++ b/compose.yaml @@ -0,0 +1,51 @@ +name: torrust +services: + tracker: + image: torrust-tracker:release + tty: true + environment: + - TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER:-mysql} + - TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN=${TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN:-MyAccessToken} + networks: + - server_side + ports: + - 6969:6969/udp + - 7070:7070 + - 1212:1212 + volumes: + - ./storage/tracker/lib:/var/lib/torrust/tracker:Z + - ./storage/tracker/log:/var/log/torrust/tracker:Z + - ./storage/tracker/etc:/etc/torrust/tracker:Z + depends_on: + - mysql + + mysql: + image: mysql:8.0 + command: "--default-authentication-plugin=mysql_native_password" + healthcheck: + test: + [ + "CMD-SHELL", + 'mysqladmin ping -h 127.0.0.1 --password="$$(cat /run/secrets/db-password)" --silent', + ] + interval: 3s + retries: 5 + start_period: 30s + environment: + - MYSQL_ROOT_HOST=% + - MYSQL_ROOT_PASSWORD=root_secret_password + - MYSQL_DATABASE=torrust_tracker + - MYSQL_USER=db_user + - MYSQL_PASSWORD=db_user_secret_password + networks: + - server_side + ports: + - 3306:3306 + volumes: + - mysql_data:/var/lib/mysql + +networks: + server_side: {} + +volumes: + mysql_data: {} diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml new file mode 100644 index 000000000..e25a9b64f --- /dev/null +++ b/contrib/bencode/Cargo.toml @@ -0,0 +1,26 @@ +[package] +description = "(contrib) Efficient decoding and encoding for bencode." +keywords = ["bencode", "contrib", "library"] +name = "torrust-tracker-contrib-bencode" +readme = "README.md" + +authors = ["Nautilus Cyberneering , Andrew "] +license = "Apache-2.0" +repository = "https://github.com/torrust/bittorrent-infrastructure-project" + +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +publish.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +thiserror = "1" + +[dev-dependencies] +criterion = "0" + +[[bench]] +harness = false +name = "bencode_benchmark" diff --git a/contrib/bencode/README.md b/contrib/bencode/README.md new file mode 100644 index 000000000..7a203082b --- /dev/null +++ b/contrib/bencode/README.md @@ -0,0 +1,4 @@ +# Bencode +This library allows for the creation and parsing of bencode encodings. + +Bencode is the binary encoding used throughout bittorrent technologies from metainfo files to DHT messages. Bencode types include integers, byte arrays, lists, and dictionaries, of which the last two can hold any bencode type (they could be recursively constructed). \ No newline at end of file diff --git a/contrib/bencode/benches/bencode_benchmark.rs b/contrib/bencode/benches/bencode_benchmark.rs new file mode 100644 index 000000000..b79bb0999 --- /dev/null +++ b/contrib/bencode/benches/bencode_benchmark.rs @@ -0,0 +1,27 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use torrust_tracker_contrib_bencode::{BDecodeOpt, BencodeRef}; + +const B_NESTED_LISTS: &[u8; 100] = + b"lllllllllllllllllllllllllllllllllllllllllllllllllleeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"; // cspell:disable-line +const MULTI_KB_BENCODE: &[u8; 30004] = include_bytes!("multi_kb.bencode"); + +fn bench_nested_lists(bencode: &[u8]) { + BencodeRef::decode(bencode, BDecodeOpt::new(50, true, true)).unwrap(); +} + +fn bench_multi_kb_bencode(bencode: &[u8]) { + BencodeRef::decode(bencode, BDecodeOpt::default()).unwrap(); +} + +fn criterion_benchmark(c: &mut Criterion) { + c.bench_function("bencode nested lists", |b| { + b.iter(|| bench_nested_lists(black_box(B_NESTED_LISTS))); + }); + + c.bench_function("bencode multi kb", |b| { + b.iter(|| bench_multi_kb_bencode(black_box(MULTI_KB_BENCODE))); + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/contrib/bencode/benches/multi_kb.bencode b/contrib/bencode/benches/multi_kb.bencode new file mode 100644 index 000000000..b86f2846e --- /dev/null +++ b/contrib/bencode/benches/multi_kb.bencode @@ -0,0 +1 @@ +d7:comment17:Just Some Comment10:created by12:bip_metainfo13:creation datei1496618058e4:infod5:filesld6:lengthi1024e4:pathl1:b11:small_1.txteed6:lengthi1024e4:pathl1:b12:small_10.txteed6:lengthi1024e4:pathl1:b13:small_100.txteed6:lengthi1024e4:pathl1:b12:small_11.txteed6:lengthi1024e4:pathl1:b12:small_12.txteed6:lengthi1024e4:pathl1:b12:small_13.txteed6:lengthi1024e4:pathl1:b12:small_14.txteed6:lengthi1024e4:pathl1:b12:small_15.txteed6:lengthi1024e4:pathl1:b12:small_16.txteed6:lengthi1024e4:pathl1:b12:small_17.txteed6:lengthi1024e4:pathl1:b12:small_18.txteed6:lengthi1024e4:pathl1:b12:small_19.txteed6:lengthi1024e4:pathl1:b11:small_2.txteed6:lengthi1024e4:pathl1:b12:small_20.txteed6:lengthi1024e4:pathl1:b12:small_21.txteed6:lengthi1024e4:pathl1:b12:small_22.txteed6:lengthi1024e4:pathl1:b12:small_23.txteed6:lengthi1024e4:pathl1:b12:small_24.txteed6:lengthi1024e4:pathl1:b12:small_25.txteed6:lengthi1024e4:pathl1:b12:small_26.txteed6:lengthi1024e4:pathl1:b12:small_27.txteed6:lengthi1024e4:pathl1:b12:small_28.txteed6:lengthi1024e4:pathl1:b12:small_29.txteed6:lengthi1024e4:pathl1:b11:small_3.txteed6:lengthi1024e4:pathl1:b12:small_30.txteed6:lengthi1024e4:pathl1:b12:small_31.txteed6:lengthi1024e4:pathl1:b12:small_32.txteed6:lengthi1024e4:pathl1:b12:small_33.txteed6:lengthi1024e4:pathl1:b12:small_34.txteed6:lengthi1024e4:pathl1:b12:small_35.txteed6:lengthi1024e4:pathl1:b12:small_36.txteed6:lengthi1024e4:pathl1:b12:small_37.txteed6:lengthi1024e4:pathl1:b12:small_38.txteed6:lengthi1024e4:pathl1:b12:small_39.txteed6:lengthi1024e4:pathl1:b11:small_4.txteed6:lengthi1024e4:pathl1:b12:small_40.txteed6:lengthi1024e4:pathl1:b12:small_41.txteed6:lengthi1024e4:pathl1:b12:small_42.txteed6:lengthi1024e4:pathl1:b12:small_43.txteed6:lengthi1024e4:pathl1:b12:small_44.txteed6:lengthi1024e4:pathl1:b12:small_45.txteed6:lengthi1024e4:pathl1:b12:small_46.txteed6:lengthi1024e4:pathl1:b12:small_47.txteed6:lengthi1024e4:pathl1:b12:small_48.txteed6:lengthi1024e4:pathl1:b12:small_49.txteed6:lengthi1024e4:pathl1:b11:small_5.txteed6:lengthi1024e4:pathl1:b12:small_50.txteed6:lengthi1024e4:pathl1:b12:small_51.txteed6:lengthi1024e4:pathl1:b12:small_52.txteed6:lengthi1024e4:pathl1:b12:small_53.txteed6:lengthi1024e4:pathl1:b12:small_54.txteed6:lengthi1024e4:pathl1:b12:small_55.txteed6:lengthi1024e4:pathl1:b12:small_56.txteed6:lengthi1024e4:pathl1:b12:small_57.txteed6:lengthi1024e4:pathl1:b12:small_58.txteed6:lengthi1024e4:pathl1:b12:small_59.txteed6:lengthi1024e4:pathl1:b11:small_6.txteed6:lengthi1024e4:pathl1:b12:small_60.txteed6:lengthi1024e4:pathl1:b12:small_61.txteed6:lengthi1024e4:pathl1:b12:small_62.txteed6:lengthi1024e4:pathl1:b12:small_63.txteed6:lengthi1024e4:pathl1:b12:small_64.txteed6:lengthi1024e4:pathl1:b12:small_65.txteed6:lengthi1024e4:pathl1:b12:small_66.txteed6:lengthi1024e4:pathl1:b12:small_67.txteed6:lengthi1024e4:pathl1:b12:small_68.txteed6:lengthi1024e4:pathl1:b12:small_69.txteed6:lengthi1024e4:pathl1:b11:small_7.txteed6:lengthi1024e4:pathl1:b12:small_70.txteed6:lengthi1024e4:pathl1:b12:small_71.txteed6:lengthi1024e4:pathl1:b12:small_72.txteed6:lengthi1024e4:pathl1:b12:small_73.txteed6:lengthi1024e4:pathl1:b12:small_74.txteed6:lengthi1024e4:pathl1:b12:small_75.txteed6:lengthi1024e4:pathl1:b12:small_76.txteed6:lengthi1024e4:pathl1:b12:small_77.txteed6:lengthi1024e4:pathl1:b12:small_78.txteed6:lengthi1024e4:pathl1:b12:small_79.txteed6:lengthi1024e4:pathl1:b11:small_8.txteed6:lengthi1024e4:pathl1:b12:small_80.txteed6:lengthi1024e4:pathl1:b12:small_81.txteed6:lengthi1024e4:pathl1:b12:small_82.txteed6:lengthi1024e4:pathl1:b12:small_83.txteed6:lengthi1024e4:pathl1:b12:small_84.txteed6:lengthi1024e4:pathl1:b12:small_85.txteed6:lengthi1024e4:pathl1:b12:small_86.txteed6:lengthi1024e4:pathl1:b12:small_87.txteed6:lengthi1024e4:pathl1:b12:small_88.txteed6:lengthi1024e4:pathl1:b12:small_89.txteed6:lengthi1024e4:pathl1:b11:small_9.txteed6:lengthi1024e4:pathl1:b12:small_90.txteed6:lengthi1024e4:pathl1:b12:small_91.txteed6:lengthi1024e4:pathl1:b12:small_92.txteed6:lengthi1024e4:pathl1:b12:small_93.txteed6:lengthi1024e4:pathl1:b12:small_94.txteed6:lengthi1024e4:pathl1:b12:small_95.txteed6:lengthi1024e4:pathl1:b12:small_96.txteed6:lengthi1024e4:pathl1:b12:small_97.txteed6:lengthi1024e4:pathl1:b12:small_98.txteed6:lengthi1024e4:pathl1:b12:small_99.txteed6:lengthi5368709120e4:pathl9:large.txteee4:name1:a12:piece lengthi4194304e6:pieces25620:+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;Ã+̽/8ñ\ë}Z‰ý…õ•â;ÃZiî½c^c.>óN7’±µìee \ No newline at end of file diff --git a/contrib/bencode/src/access/bencode.rs b/contrib/bencode/src/access/bencode.rs new file mode 100644 index 000000000..ee90296e2 --- /dev/null +++ b/contrib/bencode/src/access/bencode.rs @@ -0,0 +1,120 @@ +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; + +/// Abstract representation of a `BencodeRef` object. +pub enum RefKind<'a, K, V> { + /// Bencode Integer. + Int(i64), + /// Bencode Bytes. + Bytes(&'a [u8]), + /// Bencode List. + List(&'a dyn BListAccess), + /// Bencode Dictionary. + Dict(&'a dyn BDictAccess), +} + +/// Trait for read access to some bencode type. +pub trait BRefAccess: Sized { + type BKey; + type BType: BRefAccess; + + /// Access the bencode as a `BencodeRefKind`. + fn kind(&self) -> RefKind<'_, Self::BKey, Self::BType>; + + /// Attempt to access the bencode as a `str`. + fn str(&self) -> Option<&str>; + + /// Attempt to access the bencode as an `i64`. + fn int(&self) -> Option; + + /// Attempt to access the bencode as an `[u8]`. + fn bytes(&self) -> Option<&[u8]>; + + /// Attempt to access the bencode as an `BListAccess`. + fn list(&self) -> Option<&dyn BListAccess>; + + /// Attempt to access the bencode as an `BDictAccess`. + fn dict(&self) -> Option<&dyn BDictAccess>; +} + +/// Trait for extended read access to some bencode type. +/// +/// Use this trait when you want to make sure that the lifetime of +/// the underlying buffers is tied to the lifetime of the backing +/// bencode buffer. +pub trait BRefAccessExt<'a>: BRefAccess { + /// Attempt to access the bencode as a `str`. + fn str_ext(&self) -> Option<&'a str>; + + /// Attempt to access the bencode as an `[u8]`. + fn bytes_ext(&self) -> Option<&'a [u8]>; +} + +impl<'a, T> BRefAccess for &'a T +where + T: BRefAccess, +{ + type BKey = T::BKey; + type BType = T::BType; + + fn kind(&self) -> RefKind<'_, Self::BKey, Self::BType> { + (*self).kind() + } + + fn str(&self) -> Option<&str> { + (*self).str() + } + + fn int(&self) -> Option { + (*self).int() + } + + fn bytes(&self) -> Option<&[u8]> { + (*self).bytes() + } + + fn list(&self) -> Option<&dyn BListAccess> { + (*self).list() + } + + fn dict(&self) -> Option<&dyn BDictAccess> { + (*self).dict() + } +} + +impl<'a: 'b, 'b, T> BRefAccessExt<'a> for &'b T +where + T: BRefAccessExt<'a>, +{ + fn str_ext(&self) -> Option<&'a str> { + (*self).str_ext() + } + + fn bytes_ext(&self) -> Option<&'a [u8]> { + (*self).bytes_ext() + } +} + +/// Abstract representation of a `BencodeMut` object. +pub enum MutKind<'a, K, V> { + /// Bencode Integer. + Int(i64), + /// Bencode Bytes. + Bytes(&'a [u8]), + /// Bencode List. + List(&'a mut dyn BListAccess), + /// Bencode Dictionary. + Dict(&'a mut dyn BDictAccess), +} + +/// Trait for write access to some bencode type. +pub trait BMutAccess: Sized + BRefAccess { + /// Access the bencode as a `BencodeMutKind`. + fn kind_mut(&mut self) -> MutKind<'_, Self::BKey, Self::BType>; + + /// Attempt to access the bencode as a mutable `BListAccess`. + fn list_mut(&mut self) -> Option<&mut dyn BListAccess>; + + /// Attempt to access the bencode as a mutable `BDictAccess`. + fn dict_mut(&mut self) -> Option<&mut dyn BDictAccess>; +} diff --git a/contrib/bencode/src/access/convert.rs b/contrib/bencode/src/access/convert.rs new file mode 100644 index 000000000..b2eb41d15 --- /dev/null +++ b/contrib/bencode/src/access/convert.rs @@ -0,0 +1,212 @@ +#![allow(clippy::missing_errors_doc)] +use crate::access::bencode::{BRefAccess, BRefAccessExt}; +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; +use crate::BencodeConvertError; + +/// Trait for extended casting of bencode objects and converting conversion errors into application specific errors. +pub trait BConvertExt: BConvert { + /// See `BConvert::convert_bytes`. + fn convert_bytes_ext<'a, B, E>(&self, bencode: B, error_key: E) -> Result<&'a [u8], Self::Error> + where + B: BRefAccessExt<'a>, + E: AsRef<[u8]>, + { + bencode.bytes_ext().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Bytes".to_owned(), + })) + } + + /// See `BConvert::convert_str`. + fn convert_str_ext<'a, B, E>(&self, bencode: &B, error_key: E) -> Result<&'a str, Self::Error> + where + B: BRefAccessExt<'a>, + E: AsRef<[u8]>, + { + bencode.str_ext().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "UTF-8 Bytes".to_owned(), + })) + } + + /// See `BConvert::lookup_and_convert_bytes`. + fn lookup_and_convert_bytes_ext<'a, B, K1, K2>( + &self, + dictionary: &dyn BDictAccess, + key: K2, + ) -> Result<&'a [u8], Self::Error> + where + B: BRefAccessExt<'a>, + K2: AsRef<[u8]>, + { + self.convert_bytes_ext(self.lookup(dictionary, &key)?, &key) + } + + /// See `BConvert::lookup_and_convert_str`. + fn lookup_and_convert_str_ext<'a, B, K1, K2>( + &self, + dictionary: &dyn BDictAccess, + key: K2, + ) -> Result<&'a str, Self::Error> + where + B: BRefAccessExt<'a>, + K2: AsRef<[u8]>, + { + self.convert_str_ext(self.lookup(dictionary, &key)?, &key) + } +} + +/// Trait for casting bencode objects and converting conversion errors into application specific errors. +#[allow(clippy::module_name_repetitions)] +pub trait BConvert { + type Error; + + /// Convert the given conversion error into the appropriate error type. + fn handle_error(&self, error: BencodeConvertError) -> Self::Error; + + /// Attempt to convert the given bencode value into an integer. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_int(&self, bencode: B, error_key: E) -> Result + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.int().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Integer".to_owned(), + })) + } + + /// Attempt to convert the given bencode value into bytes. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_bytes<'a, B, E>(&self, bencode: &'a B, error_key: E) -> Result<&'a [u8], Self::Error> + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.bytes().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Bytes".to_owned(), + })) + } + + /// Attempt to convert the given bencode value into a UTF-8 string. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_str<'a, B, E>(&self, bencode: &'a B, error_key: E) -> Result<&'a str, Self::Error> + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.str().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "UTF-8 Bytes".to_owned(), + })) + } + + /// Attempt to convert the given bencode value into a list. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_list<'a, B, E>(&self, bencode: &'a B, error_key: E) -> Result<&'a dyn BListAccess, Self::Error> + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.list().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "List".to_owned(), + })) + } + + /// Attempt to convert the given bencode value into a dictionary. + /// + /// Error key is used to generate an appropriate error message should the operation return an error. + fn convert_dict<'a, B, E>(&self, bencode: &'a B, error_key: E) -> Result<&'a dyn BDictAccess, Self::Error> + where + B: BRefAccess, + E: AsRef<[u8]>, + { + bencode.dict().ok_or(self.handle_error(BencodeConvertError::WrongType { + key: error_key.as_ref().to_owned(), + expected_type: "Dictionary".to_owned(), + })) + } + + /// Look up a value in a dictionary of bencoded values using the given key. + fn lookup<'a, B, K1, K2>(&self, dictionary: &'a dyn BDictAccess, key: K2) -> Result<&'a B, Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + let key_ref = key.as_ref(); + + match dictionary.lookup(key_ref) { + Some(n) => Ok(n), + None => Err(self.handle_error(BencodeConvertError::MissingKey { key: key_ref.to_owned() })), + } + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to an integer. + fn lookup_and_convert_int(&self, dictionary: &dyn BDictAccess, key: K2) -> Result + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_int(self.lookup(dictionary, &key)?, &key) + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to a series of bytes. + fn lookup_and_convert_bytes<'a, B, K1, K2>( + &self, + dictionary: &'a dyn BDictAccess, + key: K2, + ) -> Result<&'a [u8], Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_bytes(self.lookup(dictionary, &key)?, &key) + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to a UTF-8 string. + fn lookup_and_convert_str<'a, B, K1, K2>( + &self, + dictionary: &'a dyn BDictAccess, + key: K2, + ) -> Result<&'a str, Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_str(self.lookup(dictionary, &key)?, &key) + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to a list. + fn lookup_and_convert_list<'a, B, K1, K2>( + &self, + dictionary: &'a dyn BDictAccess, + key: K2, + ) -> Result<&'a dyn BListAccess, Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_list(self.lookup(dictionary, &key)?, &key) + } + + /// Combines a lookup operation on the given key with a conversion of the value, if found, to a dictionary. + fn lookup_and_convert_dict<'a, B, K1, K2>( + &self, + dictionary: &'a dyn BDictAccess, + key: K2, + ) -> Result<&'a dyn BDictAccess, Self::Error> + where + B: BRefAccess, + K2: AsRef<[u8]>, + { + self.convert_dict(self.lookup(dictionary, &key)?, &key) + } +} diff --git a/contrib/bencode/src/access/dict.rs b/contrib/bencode/src/access/dict.rs new file mode 100644 index 000000000..a3e56d1bb --- /dev/null +++ b/contrib/bencode/src/access/dict.rs @@ -0,0 +1,64 @@ +use std::borrow::Cow; +use std::collections::BTreeMap; + +/// Trait for working with generic map data structures. +pub trait BDictAccess { + /// Convert the dictionary to an unordered list of key/value pairs. + fn to_list(&self) -> Vec<(&K, &V)>; + + /// Lookup a value in the dictionary. + fn lookup(&self, key: &[u8]) -> Option<&V>; + + /// Lookup a mutable value in the dictionary. + fn lookup_mut(&mut self, key: &[u8]) -> Option<&mut V>; + + /// Insert a key/value pair into the dictionary. + fn insert(&mut self, key: K, value: V) -> Option; + + /// Remove a value from the dictionary and return it. + fn remove(&mut self, key: &[u8]) -> Option; +} + +impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { + fn to_list(&self) -> Vec<(&&'a [u8], &V)> { + self.iter().collect() + } + + fn lookup(&self, key: &[u8]) -> Option<&V> { + self.get(key) + } + + fn lookup_mut(&mut self, key: &[u8]) -> Option<&mut V> { + self.get_mut(key) + } + + fn insert(&mut self, key: &'a [u8], value: V) -> Option { + self.insert(key, value) + } + + fn remove(&mut self, key: &[u8]) -> Option { + self.remove(key) + } +} + +impl<'a, V> BDictAccess, V> for BTreeMap, V> { + fn to_list(&self) -> Vec<(&Cow<'a, [u8]>, &V)> { + self.iter().collect() + } + + fn lookup(&self, key: &[u8]) -> Option<&V> { + self.get(key) + } + + fn lookup_mut(&mut self, key: &[u8]) -> Option<&mut V> { + self.get_mut(key) + } + + fn insert(&mut self, key: Cow<'a, [u8]>, value: V) -> Option { + self.insert(key, value) + } + + fn remove(&mut self, key: &[u8]) -> Option { + self.remove(key) + } +} diff --git a/contrib/bencode/src/access/list.rs b/contrib/bencode/src/access/list.rs new file mode 100644 index 000000000..840bffa1e --- /dev/null +++ b/contrib/bencode/src/access/list.rs @@ -0,0 +1,108 @@ +use std::ops::{Index, IndexMut}; + +/// Trait for working with generic list data structures. +pub trait BListAccess { + /// Get a list element at the given index. + fn get(&self, index: usize) -> Option<&V>; + + /// Get a mutable list element at the given index. + fn get_mut(&mut self, index: usize) -> Option<&mut V>; + + /// Remove a list element at the given index. + fn remove(&mut self, index: usize) -> Option; + + /// Insert a list element at the given index. + fn insert(&mut self, index: usize, item: V); + + /// Push an element to the back of the list. + fn push(&mut self, item: V); + + /// Get the length of the list. + fn len(&self) -> usize; + + fn is_empty(&self) -> bool; +} + +impl<'a, V: 'a> Index for &'a dyn BListAccess { + type Output = V; + + fn index(&self, index: usize) -> &V { + self.get(index).unwrap() + } +} + +impl<'a, V: 'a> Index for &'a mut dyn BListAccess { + type Output = V; + + fn index(&self, index: usize) -> &V { + self.get(index).unwrap() + } +} + +impl<'a, V: 'a> IndexMut for &'a mut dyn BListAccess { + fn index_mut(&mut self, index: usize) -> &mut V { + self.get_mut(index).unwrap() + } +} + +impl<'a, V: 'a> IntoIterator for &'a dyn BListAccess { + type Item = &'a V; + type IntoIter = BListIter<'a, V>; + + fn into_iter(self) -> BListIter<'a, V> { + BListIter { index: 0, access: self } + } +} + +pub struct BListIter<'a, V> { + index: usize, + access: &'a dyn BListAccess, +} + +impl<'a, V> Iterator for BListIter<'a, V> { + type Item = &'a V; + + fn next(&mut self) -> Option<&'a V> { + let opt_next = self.access.get(self.index); + + if opt_next.is_some() { + self.index += 1; + } + + opt_next + } +} + +impl BListAccess for Vec { + fn get(&self, index: usize) -> Option<&V> { + self[..].get(index) + } + + fn get_mut(&mut self, index: usize) -> Option<&mut V> { + self[..].get_mut(index) + } + + fn remove(&mut self, index: usize) -> Option { + if index >= self[..].len() { + None + } else { + Some(Vec::remove(self, index)) + } + } + + fn insert(&mut self, index: usize, item: V) { + Vec::insert(self, index, item); + } + + fn push(&mut self, item: V) { + Vec::push(self, item); + } + + fn len(&self) -> usize { + Vec::len(self) + } + + fn is_empty(&self) -> bool { + Vec::is_empty(self) + } +} diff --git a/contrib/bencode/src/access/mod.rs b/contrib/bencode/src/access/mod.rs new file mode 100644 index 000000000..f14b032d4 --- /dev/null +++ b/contrib/bencode/src/access/mod.rs @@ -0,0 +1,4 @@ +pub mod bencode; +pub mod convert; +pub mod dict; +pub mod list; diff --git a/contrib/bencode/src/cow.rs b/contrib/bencode/src/cow.rs new file mode 100644 index 000000000..0d38c751b --- /dev/null +++ b/contrib/bencode/src/cow.rs @@ -0,0 +1,44 @@ +use std::borrow::Cow; + +/// Trait for macros to convert owned/borrowed types to `Cow`. +/// +/// This is needed because `&str` and `String` do not have `From` +/// implements into `Cow<_, [u8]>`. One solution is to just call `AsRef<[u8]>` +/// before converting. However, then when a user specifies an owned type, +/// we will implicitly borrow that; this trait prevents that so that macro +/// behavior is intuitive, so that owned types stay owned. +pub trait BCowConvert<'a> { + fn convert(self) -> Cow<'a, [u8]>; +} + +// TODO: Enable when specialization lands. +/* +impl<'a, T> BCowConvert<'a> for T where T: AsRef<[u8]> + 'a { + fn convert(self) -> Cow<'a, [u8]> { + self.into() + } +}*/ + +impl<'a> BCowConvert<'a> for &'a [u8] { + fn convert(self) -> Cow<'a, [u8]> { + self.into() + } +} + +impl<'a> BCowConvert<'a> for &'a str { + fn convert(self) -> Cow<'a, [u8]> { + self.as_bytes().into() + } +} + +impl BCowConvert<'static> for String { + fn convert(self) -> Cow<'static, [u8]> { + self.into_bytes().into() + } +} + +impl BCowConvert<'static> for Vec { + fn convert(self) -> Cow<'static, [u8]> { + self.into() + } +} diff --git a/contrib/bencode/src/error.rs b/contrib/bencode/src/error.rs new file mode 100644 index 000000000..6e661a068 --- /dev/null +++ b/contrib/bencode/src/error.rs @@ -0,0 +1,52 @@ +use thiserror::Error; + +#[allow(clippy::module_name_repetitions)] +#[derive(Error, Debug)] +pub enum BencodeParseError { + #[error("Incomplete Number Of Bytes At {pos}")] + BytesEmpty { pos: usize }, + + #[error("Invalid Byte Found At {pos}")] + InvalidByte { pos: usize }, + + #[error("Invalid Integer Found With No Delimiter At {pos}")] + InvalidIntNoDelimiter { pos: usize }, + + #[error("Invalid Integer Found As Negative Zero At {pos}")] + InvalidIntNegativeZero { pos: usize }, + + #[error("Invalid Integer Found With Zero Padding At {pos}")] + InvalidIntZeroPadding { pos: usize }, + + #[error("Invalid Integer Found To Fail Parsing At {pos}")] + InvalidIntParseError { pos: usize }, + + #[error("Invalid Dictionary Key Ordering Found At {pos} For Key {key:?}")] + InvalidKeyOrdering { pos: usize, key: Vec }, + + #[error("Invalid Dictionary Key Found At {pos} For Key {key:?}")] + InvalidKeyDuplicates { pos: usize, key: Vec }, + + #[error("Invalid Byte Length Found As Negative At {pos}")] + InvalidLengthNegative { pos: usize }, + + #[error("Invalid Byte Length Found To Overflow Buffer Length At {pos}")] + InvalidLengthOverflow { pos: usize }, + + #[error("Invalid Recursion Limit Exceeded At {pos} For Limit {max}")] + InvalidRecursionExceeded { pos: usize, max: usize }, +} + +pub type BencodeParseResult = Result; + +#[allow(clippy::module_name_repetitions)] +#[derive(Error, Debug)] +pub enum BencodeConvertError { + #[error("Missing Key In Bencode For {key:?}")] + MissingKey { key: Vec }, + + #[error("Wrong Type In Bencode For {key:?} Expected Type {expected_type}")] + WrongType { key: Vec, expected_type: String }, +} + +pub type BencodeConvertResult = Result; diff --git a/contrib/bencode/src/lib.rs b/contrib/bencode/src/lib.rs new file mode 100644 index 000000000..09aaa6867 --- /dev/null +++ b/contrib/bencode/src/lib.rs @@ -0,0 +1,139 @@ +//! Library for parsing and converting bencoded data. +//! +//! # Examples +//! +//! Decoding bencoded data: +//! +//! ```rust +//! extern crate bencode; +//! +//! use bencode::{BencodeRef, BRefAccess, BDecodeOpt}; +//! +//! fn main() { +//! let data = b"d12:lucky_numberi7ee"; // cspell:disable-line +//! let bencode = BencodeRef::decode(data, BDecodeOpt::default()).unwrap(); +//! +//! assert_eq!(7, bencode.dict().unwrap().lookup("lucky_number".as_bytes()) +//! .unwrap().int().unwrap()); +//! } +//! ``` +//! +//! Encoding bencoded data: +//! +//! ```rust +//! #[macro_use] +//! extern crate bencode; +//! +//! fn main() { +//! let message = (ben_map!{ +//! "lucky_number" => ben_int!(7), +//! "lucky_string" => ben_bytes!("7") +//! }).encode(); +//! +//! let data = b"d12:lucky_numberi7e12:lucky_string1:7e"; // cspell:disable-line +//! assert_eq!(&data[..], &message[..]); +//! } +//! ``` + +mod access; +mod cow; +mod error; +mod mutable; +mod reference; + +/// Traits for implementation functionality. +pub mod inner { + pub use crate::cow::BCowConvert; +} + +/// Traits for extended functionality. +pub mod ext { + #[allow(clippy::module_name_repetitions)] + pub use crate::access::bencode::BRefAccessExt; + #[allow(clippy::module_name_repetitions)] + pub use crate::access::convert::BConvertExt; +} + +#[deprecated(since = "1.0.0", note = "use `MutKind` instead.")] +pub use crate::access::bencode::MutKind as BencodeMutKind; +#[deprecated(since = "1.0.0", note = "use `RefKind` instead.")] +pub use crate::access::bencode::RefKind as BencodeRefKind; +pub use crate::access::bencode::{BMutAccess, BRefAccess, MutKind, RefKind}; +pub use crate::access::convert::BConvert; +pub use crate::access::dict::BDictAccess; +pub use crate::access::list::BListAccess; +pub use crate::error::{BencodeConvertError, BencodeConvertResult, BencodeParseError, BencodeParseResult}; +pub use crate::mutable::bencode_mut::BencodeMut; +pub use crate::reference::bencode_ref::BencodeRef; +pub use crate::reference::decode_opt::BDecodeOpt; + +const BEN_END: u8 = b'e'; +const DICT_START: u8 = b'd'; +const LIST_START: u8 = b'l'; +const INT_START: u8 = b'i'; + +const BYTE_LEN_LOW: u8 = b'0'; +const BYTE_LEN_HIGH: u8 = b'9'; +const BYTE_LEN_END: u8 = b':'; + +/// Construct a `BencodeMut` map by supplying string references as keys and `BencodeMut` as values. +#[macro_export] +macro_rules! ben_map { +( $($key:expr => $val:expr),* ) => { + { + use $crate::{BMutAccess, BencodeMut}; + use $crate::inner::BCowConvert; + + let mut bencode_map = BencodeMut::new_dict(); + { + let map = bencode_map.dict_mut().unwrap(); + $( + map.insert(BCowConvert::convert($key), $val); + )* + } + + bencode_map + } + } +} + +/// Construct a `BencodeMut` list by supplying a list of `BencodeMut` values. +#[macro_export] +macro_rules! ben_list { + ( $($ben:expr),* ) => { + { + use $crate::{BencodeMut, BMutAccess}; + + let mut bencode_list = BencodeMut::new_list(); + { + let list = bencode_list.list_mut().unwrap(); + $( + list.push($ben); + )* + } + + bencode_list + } + } +} + +/// Construct `BencodeMut` bytes by supplying a type convertible to `Vec`. +#[macro_export] +macro_rules! ben_bytes { + ( $ben:expr ) => {{ + use $crate::inner::BCowConvert; + use $crate::BencodeMut; + + BencodeMut::new_bytes(BCowConvert::convert($ben)) + }}; +} + +/// Construct a `BencodeMut` integer by supplying an `i64`. +#[macro_export] +macro_rules! ben_int { + ( $ben:expr ) => {{ + use $crate::BencodeMut; + + BencodeMut::new_int($ben) + }}; +} diff --git a/contrib/bencode/src/mutable/bencode_mut.rs b/contrib/bencode/src/mutable/bencode_mut.rs new file mode 100644 index 000000000..a3f95dbbf --- /dev/null +++ b/contrib/bencode/src/mutable/bencode_mut.rs @@ -0,0 +1,229 @@ +use std::borrow::Cow; +use std::collections::BTreeMap; +use std::str; + +use crate::access::bencode::{BMutAccess, BRefAccess, MutKind, RefKind}; +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; +use crate::mutable::encode; + +/// Bencode object that holds references to the underlying data. +#[derive(Debug, Eq, PartialEq, Clone, Hash)] +pub enum Inner<'a> { + /// Bencode Integer. + Int(i64), + /// Bencode Bytes. + Bytes(Cow<'a, [u8]>), + /// Bencode List. + List(Vec>), + /// Bencode Dictionary. + Dict(BTreeMap, BencodeMut<'a>>), +} + +/// `BencodeMut` object that stores references to some data. +#[derive(Debug, Eq, PartialEq, Clone, Hash)] +pub struct BencodeMut<'a> { + inner: Inner<'a>, +} + +impl<'a> BencodeMut<'a> { + fn new(inner: Inner<'a>) -> BencodeMut<'a> { + BencodeMut { inner } + } + + /// Create a new `BencodeMut` representing an `i64`. + #[must_use] + pub fn new_int(value: i64) -> BencodeMut<'a> { + BencodeMut::new(Inner::Int(value)) + } + + /// Create a new `BencodeMut` representing a `[u8]`. + #[must_use] + pub fn new_bytes(value: Cow<'a, [u8]>) -> BencodeMut<'a> { + BencodeMut::new(Inner::Bytes(value)) + } + + /// Create a new `BencodeMut` representing a `BListAccess`. + #[must_use] + pub fn new_list() -> BencodeMut<'a> { + BencodeMut::new(Inner::List(Vec::new())) + } + + /// Create a new `BencodeMut` representing a `BDictAccess`. + #[must_use] + pub fn new_dict() -> BencodeMut<'a> { + BencodeMut::new(Inner::Dict(BTreeMap::new())) + } + + /// Encode the `BencodeMut` into a buffer representing the bencode. + #[must_use] + pub fn encode(&self) -> Vec { + let mut buffer = Vec::new(); + + encode::encode(self, &mut buffer); + + buffer + } +} + +impl<'a> BRefAccess for BencodeMut<'a> { + type BKey = Cow<'a, [u8]>; + type BType = BencodeMut<'a>; + + fn kind<'b>(&'b self) -> RefKind<'b, Cow<'a, [u8]>, BencodeMut<'a>> { + match self.inner { + Inner::Int(n) => RefKind::Int(n), + Inner::Bytes(ref n) => RefKind::Bytes(n), + Inner::List(ref n) => RefKind::List(n), + Inner::Dict(ref n) => RefKind::Dict(n), + } + } + + fn str(&self) -> Option<&str> { + let bytes = self.bytes()?; + + match str::from_utf8(bytes) { + Ok(n) => Some(n), + Err(_) => None, + } + } + + fn int(&self) -> Option { + match self.inner { + Inner::Int(n) => Some(n), + _ => None, + } + } + + fn bytes(&self) -> Option<&[u8]> { + match self.inner { + Inner::Bytes(ref n) => Some(n.as_ref()), + _ => None, + } + } + + fn list(&self) -> Option<&dyn BListAccess>> { + match self.inner { + Inner::List(ref n) => Some(n), + _ => None, + } + } + + fn dict(&self) -> Option<&dyn BDictAccess, BencodeMut<'a>>> { + match self.inner { + Inner::Dict(ref n) => Some(n), + _ => None, + } + } +} + +impl<'a> BMutAccess for BencodeMut<'a> { + fn kind_mut<'b>(&'b mut self) -> MutKind<'b, Cow<'a, [u8]>, BencodeMut<'a>> { + match self.inner { + Inner::Int(n) => MutKind::Int(n), + Inner::Bytes(ref mut n) => MutKind::Bytes((*n).as_ref()), + Inner::List(ref mut n) => MutKind::List(n), + Inner::Dict(ref mut n) => MutKind::Dict(n), + } + } + + fn list_mut(&mut self) -> Option<&mut dyn BListAccess>> { + match self.inner { + Inner::List(ref mut n) => Some(n), + _ => None, + } + } + + fn dict_mut(&mut self) -> Option<&mut dyn BDictAccess, BencodeMut<'a>>> { + match self.inner { + Inner::Dict(ref mut n) => Some(n), + _ => None, + } + } +} + +// impl<'a> From> for BencodeMut<'a> { +// fn from(value: BencodeRef<'a>) -> Self { +// let inner = match value.kind() { +// BencodeRefKind::Int(value) => InnerBencodeMut::Int(value), +// BencodeRefKind::Bytes(value) => InnerBencodeMut::Bytes(Cow::Owned(Vec::from(value))), +// BencodeRefKind::List(value) => { +// InnerBencodeMut::List(value.clone().into_iter().map(|b| BencodeMut::from(b.clone())).collect()) +// } +// BencodeRefKind::Dict(value) => InnerBencodeMut::Dict( +// value +// .to_list() +// .into_iter() +// .map(|(key, value)| (Cow::Owned(Vec::from(*key)), BencodeMut::from(value.clone()))) +// .collect(), +// ), +// }; +// BencodeMut { inner } +// } +// } + +#[cfg(test)] +mod test { + use crate::access::bencode::BMutAccess; + use crate::mutable::bencode_mut::BencodeMut; + + #[test] + fn positive_int_encode() { + let bencode_int = BencodeMut::new_int(-560); + + let int_bytes = b"i-560e"; // cspell:disable-line + assert_eq!(&int_bytes[..], &bencode_int.encode()[..]); + } + + #[test] + fn positive_bytes_encode() { + /* cspell:disable-next-line */ + let bencode_bytes = BencodeMut::new_bytes((&b"asdasd"[..]).into()); + + let bytes_bytes = b"6:asdasd"; // cspell:disable-line + assert_eq!(&bytes_bytes[..], &bencode_bytes.encode()[..]); + } + + #[test] + fn positive_empty_list_encode() { + let bencode_list = BencodeMut::new_list(); + + let list_bytes = b"le"; // cspell:disable-line + assert_eq!(&list_bytes[..], &bencode_list.encode()[..]); + } + + #[test] + fn positive_nonempty_list_encode() { + let mut bencode_list = BencodeMut::new_list(); + + { + let list_mut = bencode_list.list_mut().unwrap(); + list_mut.push(BencodeMut::new_int(56)); + } + + let list_bytes = b"li56ee"; // cspell:disable-line + assert_eq!(&list_bytes[..], &bencode_list.encode()[..]); + } + + #[test] + fn positive_empty_dict_encode() { + let bencode_dict = BencodeMut::new_dict(); + + let dict_bytes = b"de"; // cspell:disable-line + assert_eq!(&dict_bytes[..], &bencode_dict.encode()[..]); + } + + #[test] + fn positive_nonempty_dict_encode() { + let mut bencode_dict = BencodeMut::new_dict(); + + { + let dict_mut = bencode_dict.dict_mut().unwrap(); + /* cspell:disable-next-line */ + dict_mut.insert((&b"asd"[..]).into(), BencodeMut::new_bytes((&b"asdasd"[..]).into())); + } + + let dict_bytes = b"d3:asd6:asdasde"; // cspell:disable-line + assert_eq!(&dict_bytes[..], &bencode_dict.encode()[..]); + } +} diff --git a/contrib/bencode/src/mutable/encode.rs b/contrib/bencode/src/mutable/encode.rs new file mode 100644 index 000000000..811c35816 --- /dev/null +++ b/contrib/bencode/src/mutable/encode.rs @@ -0,0 +1,67 @@ +use std::iter::Extend; + +use crate::access::bencode::{BRefAccess, RefKind}; +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; + +pub fn encode(val: T, bytes: &mut Vec) +where + T: BRefAccess, + T::BKey: AsRef<[u8]>, +{ + match val.kind() { + RefKind::Int(n) => encode_int(n, bytes), + RefKind::Bytes(n) => encode_bytes(n, bytes), + RefKind::List(n) => encode_list(n, bytes), + RefKind::Dict(n) => encode_dict(n, bytes), + } +} + +fn encode_int(val: i64, bytes: &mut Vec) { + bytes.push(crate::INT_START); + + bytes.extend(val.to_string().into_bytes()); + + bytes.push(crate::BEN_END); +} + +fn encode_bytes(list: &[u8], bytes: &mut Vec) { + bytes.extend(list.len().to_string().into_bytes()); + + bytes.push(crate::BYTE_LEN_END); + + bytes.extend(list.iter().copied()); +} + +fn encode_list(list: &dyn BListAccess, bytes: &mut Vec) +where + T: BRefAccess, + T::BKey: AsRef<[u8]>, +{ + bytes.push(crate::LIST_START); + + for i in list { + encode(i, bytes); + } + + bytes.push(crate::BEN_END); +} + +fn encode_dict(dict: &dyn BDictAccess, bytes: &mut Vec) +where + K: AsRef<[u8]>, + V: BRefAccess, + V::BKey: AsRef<[u8]>, +{ + // Need To Sort The Keys In The Map Before Encoding + let mut sort_dict = dict.to_list(); + sort_dict.sort_by(|&(a, _), &(b, _)| a.as_ref().cmp(b.as_ref())); + + bytes.push(crate::DICT_START); + // Iterate And Dictionary Encode The (String, Bencode) Pairs + for (key, value) in &sort_dict { + encode_bytes(key.as_ref(), bytes); + encode(value, bytes); + } + bytes.push(crate::BEN_END); +} diff --git a/contrib/bencode/src/mutable/mod.rs b/contrib/bencode/src/mutable/mod.rs new file mode 100644 index 000000000..329ee9f7a --- /dev/null +++ b/contrib/bencode/src/mutable/mod.rs @@ -0,0 +1,2 @@ +pub mod bencode_mut; +mod encode; diff --git a/contrib/bencode/src/reference/bencode_ref.rs b/contrib/bencode/src/reference/bencode_ref.rs new file mode 100644 index 000000000..73aaad039 --- /dev/null +++ b/contrib/bencode/src/reference/bencode_ref.rs @@ -0,0 +1,262 @@ +use std::collections::BTreeMap; +use std::str; + +use crate::access::bencode::{BRefAccess, BRefAccessExt, RefKind}; +use crate::access::dict::BDictAccess; +use crate::access::list::BListAccess; +use crate::error::{BencodeParseError, BencodeParseResult}; +use crate::reference::decode; +use crate::reference::decode_opt::BDecodeOpt; + +/// Bencode object that holds references to the underlying data. +#[derive(Debug, Eq, PartialEq, Clone, Hash)] +pub enum Inner<'a> { + /// Bencode Integer. + Int(i64, &'a [u8]), + /// Bencode Bytes. + Bytes(&'a [u8], &'a [u8]), + /// Bencode List. + List(Vec>, &'a [u8]), + /// Bencode Dictionary. + Dict(BTreeMap<&'a [u8], BencodeRef<'a>>, &'a [u8]), +} + +impl<'a> From> for BencodeRef<'a> { + fn from(val: Inner<'a>) -> Self { + BencodeRef { inner: val } + } +} + +/// `BencodeRef` object that stores references to some buffer. +#[derive(Debug, Eq, PartialEq, Clone, Hash)] +pub struct BencodeRef<'a> { + inner: Inner<'a>, +} + +impl<'a> BencodeRef<'a> { + /// Decode the given bytes into a `BencodeRef` using the given decode options. + #[allow(clippy::missing_errors_doc)] + pub fn decode(bytes: &'a [u8], opts: BDecodeOpt) -> BencodeParseResult> { + // Apply try so any errors return before the eof check + let (bencode, end_pos) = decode::decode(bytes, 0, opts, 0)?; + + if end_pos != bytes.len() && opts.enforce_full_decode() { + return Err(BencodeParseError::BytesEmpty { pos: end_pos }); + } + + Ok(bencode) + } + + /// Get a byte slice of the current bencode byte representation. + #[must_use] + pub fn buffer(&self) -> &'a [u8] { + #[allow(clippy::match_same_arms)] + match self.inner { + Inner::Int(_, buffer) => buffer, + Inner::Bytes(_, buffer) => buffer, + Inner::List(_, buffer) => buffer, + Inner::Dict(_, buffer) => buffer, + } + } +} + +impl<'a> BRefAccess for BencodeRef<'a> { + type BKey = &'a [u8]; + type BType = BencodeRef<'a>; + + fn kind<'b>(&'b self) -> RefKind<'b, &'a [u8], BencodeRef<'a>> { + match self.inner { + Inner::Int(n, _) => RefKind::Int(n), + Inner::Bytes(n, _) => RefKind::Bytes(n), + Inner::List(ref n, _) => RefKind::List(n), + Inner::Dict(ref n, _) => RefKind::Dict(n), + } + } + + fn str(&self) -> Option<&str> { + self.str_ext() + } + + fn int(&self) -> Option { + match self.inner { + Inner::Int(n, _) => Some(n), + _ => None, + } + } + + fn bytes(&self) -> Option<&[u8]> { + self.bytes_ext() + } + + fn list(&self) -> Option<&dyn BListAccess>> { + match self.inner { + Inner::List(ref n, _) => Some(n), + _ => None, + } + } + + fn dict(&self) -> Option<&dyn BDictAccess<&'a [u8], BencodeRef<'a>>> { + match self.inner { + Inner::Dict(ref n, _) => Some(n), + _ => None, + } + } +} + +impl<'a> BRefAccessExt<'a> for BencodeRef<'a> { + fn str_ext(&self) -> Option<&'a str> { + let bytes = self.bytes_ext()?; + + match str::from_utf8(bytes) { + Ok(n) => Some(n), + Err(_) => None, + } + } + + fn bytes_ext(&self) -> Option<&'a [u8]> { + match self.inner { + Inner::Bytes(n, _) => Some(&n[0..]), + _ => None, + } + } +} + +#[cfg(test)] +mod tests { + + use crate::access::bencode::BRefAccess; + use crate::reference::bencode_ref::BencodeRef; + use crate::reference::decode_opt::BDecodeOpt; + + #[test] + fn positive_int_buffer() { + let int_bytes = b"i-500e"; // cspell:disable-line + let bencode = BencodeRef::decode(&int_bytes[..], BDecodeOpt::default()).unwrap(); + + assert_eq!(int_bytes, bencode.buffer()); + } + + #[test] + fn positive_bytes_buffer() { + let bytes_bytes = b"3:asd"; // cspell:disable-line + let bencode = BencodeRef::decode(&bytes_bytes[..], BDecodeOpt::default()).unwrap(); + + assert_eq!(bytes_bytes, bencode.buffer()); + } + + #[test] + fn positive_list_buffer() { + let list_bytes = b"l3:asde"; // cspell:disable-line + let bencode = BencodeRef::decode(&list_bytes[..], BDecodeOpt::default()).unwrap(); + + assert_eq!(list_bytes, bencode.buffer()); + } + + #[test] + fn positive_dict_buffer() { + let dict_bytes = b"d3:asd3:asde"; // cspell:disable-line + let bencode = BencodeRef::decode(&dict_bytes[..], BDecodeOpt::default()).unwrap(); + + assert_eq!(dict_bytes, bencode.buffer()); + } + + #[test] + fn positive_list_nested_int_buffer() { + let nested_int_bytes = b"li-500ee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_int_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_list = bencode.list().unwrap(); + let bencode_int = bencode_list.get(0).unwrap(); + + let int_bytes = b"i-500e"; // cspell:disable-line + assert_eq!(int_bytes, bencode_int.buffer()); + } + + #[test] + fn positive_dict_nested_int_buffer() { + let nested_int_bytes = b"d3:asdi-500ee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_int_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_dict = bencode.dict().unwrap(); + /* cspell:disable-next-line */ + let bencode_int = bencode_dict.lookup(&b"asd"[..]).unwrap(); + + let int_bytes = b"i-500e"; // cspell:disable-line + assert_eq!(int_bytes, bencode_int.buffer()); + } + + #[test] + fn positive_list_nested_bytes_buffer() { + let nested_bytes_bytes = b"l3:asde"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_bytes_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_list = bencode.list().unwrap(); + let bencode_bytes = bencode_list.get(0).unwrap(); + + let bytes_bytes = b"3:asd"; // cspell:disable-line + assert_eq!(bytes_bytes, bencode_bytes.buffer()); + } + + #[test] + fn positive_dict_nested_bytes_buffer() { + let nested_bytes_bytes = b"d3:asd3:asde"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_bytes_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_dict = bencode.dict().unwrap(); + /* cspell:disable-next-line */ + let bencode_bytes = bencode_dict.lookup(&b"asd"[..]).unwrap(); + + let bytes_bytes = b"3:asd"; // cspell:disable-line + assert_eq!(bytes_bytes, bencode_bytes.buffer()); + } + + #[test] + fn positive_list_nested_list_buffer() { + let nested_list_bytes = b"ll3:asdee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_list_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_list = bencode.list().unwrap(); + let bencode_list = bencode_list.get(0).unwrap(); + + let list_bytes = b"l3:asde"; // cspell:disable-line + assert_eq!(list_bytes, bencode_list.buffer()); + } + + #[test] + fn positive_dict_nested_list_buffer() { + let nested_list_bytes = b"d3:asdl3:asdee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_list_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_dict = bencode.dict().unwrap(); + /* cspell:disable-next-line */ + let bencode_list = bencode_dict.lookup(&b"asd"[..]).unwrap(); + + let list_bytes = b"l3:asde"; // cspell:disable-line + assert_eq!(list_bytes, bencode_list.buffer()); + } + + #[test] + fn positive_list_nested_dict_buffer() { + let nested_dict_bytes = b"ld3:asd3:asdee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_dict_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_list = bencode.list().unwrap(); + let bencode_dict = bencode_list.get(0).unwrap(); + + let dict_bytes = b"d3:asd3:asde"; // cspell:disable-line + assert_eq!(dict_bytes, bencode_dict.buffer()); + } + + #[test] + fn positive_dict_nested_dict_buffer() { + let nested_dict_bytes = b"d3:asdd3:asd3:asdee"; // cspell:disable-line + let bencode = BencodeRef::decode(&nested_dict_bytes[..], BDecodeOpt::default()).unwrap(); + + let bencode_dict = bencode.dict().unwrap(); + /* cspell:disable-next-line */ + let bencode_dict = bencode_dict.lookup(&b"asd"[..]).unwrap(); + + let dict_bytes = b"d3:asd3:asde"; // cspell:disable-line + assert_eq!(dict_bytes, bencode_dict.buffer()); + } +} diff --git a/contrib/bencode/src/reference/decode.rs b/contrib/bencode/src/reference/decode.rs new file mode 100644 index 000000000..97c5cf1ff --- /dev/null +++ b/contrib/bencode/src/reference/decode.rs @@ -0,0 +1,376 @@ +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::str; + +use crate::error::{BencodeParseError, BencodeParseResult}; +use crate::reference::bencode_ref::{BencodeRef, Inner}; +use crate::reference::decode_opt::BDecodeOpt; + +pub fn decode(bytes: &[u8], pos: usize, opts: BDecodeOpt, depth: usize) -> BencodeParseResult<(BencodeRef<'_>, usize)> { + if depth >= opts.max_recursion() { + return Err(BencodeParseError::InvalidRecursionExceeded { pos, max: depth }); + } + let curr_byte = peek_byte(bytes, pos)?; + + match curr_byte { + crate::INT_START => { + let (bencode, next_pos) = decode_int(bytes, pos + 1, crate::BEN_END)?; + Ok((Inner::Int(bencode, &bytes[pos..next_pos]).into(), next_pos)) + } + crate::LIST_START => { + let (bencode, next_pos) = decode_list(bytes, pos + 1, opts, depth)?; + Ok((Inner::List(bencode, &bytes[pos..next_pos]).into(), next_pos)) + } + crate::DICT_START => { + let (bencode, next_pos) = decode_dict(bytes, pos + 1, opts, depth)?; + Ok((Inner::Dict(bencode, &bytes[pos..next_pos]).into(), next_pos)) + } + crate::BYTE_LEN_LOW..=crate::BYTE_LEN_HIGH => { + let (bencode, next_pos) = decode_bytes(bytes, pos)?; + // Include the length digit, don't increment position + Ok((Inner::Bytes(bencode, &bytes[pos..next_pos]).into(), next_pos)) + } + _ => Err(BencodeParseError::InvalidByte { pos }), + } +} + +fn decode_int(bytes: &[u8], pos: usize, delim: u8) -> BencodeParseResult<(i64, usize)> { + let (_, begin_decode) = bytes.split_at(pos); + + let Some(relative_end_pos) = begin_decode.iter().position(|n| *n == delim) else { + return Err(BencodeParseError::InvalidIntNoDelimiter { pos }); + }; + let int_byte_slice = &begin_decode[..relative_end_pos]; + + if int_byte_slice.len() > 1 { + // Negative zero is not allowed (this would not be caught when converting) + if int_byte_slice[0] == b'-' && int_byte_slice[1] == b'0' { + return Err(BencodeParseError::InvalidIntNegativeZero { pos }); + } + + // Zero padding is illegal, and unspecified for key lengths (we disallow both) + if int_byte_slice[0] == b'0' { + return Err(BencodeParseError::InvalidIntZeroPadding { pos }); + } + } + + let Ok(int_str) = str::from_utf8(int_byte_slice) else { + return Err(BencodeParseError::InvalidIntParseError { pos }); + }; + + // Position of end of integer type, next byte is the start of the next value + let absolute_end_pos = pos + relative_end_pos; + let next_pos = absolute_end_pos + 1; + match int_str.parse::() { + Ok(n) => Ok((n, next_pos)), + Err(_) => Err(BencodeParseError::InvalidIntParseError { pos }), + } +} + +use std::convert::TryFrom; + +fn decode_bytes(bytes: &[u8], pos: usize) -> BencodeParseResult<(&[u8], usize)> { + let (num_bytes, start_pos) = decode_int(bytes, pos, crate::BYTE_LEN_END)?; + + if num_bytes < 0 { + return Err(BencodeParseError::InvalidLengthNegative { pos }); + } + + // Use usize::try_from to handle potential overflow + let num_bytes = usize::try_from(num_bytes).map_err(|_| BencodeParseError::InvalidLengthOverflow { pos })?; + + if num_bytes > bytes[start_pos..].len() { + return Err(BencodeParseError::InvalidLengthOverflow { pos }); + } + + let next_pos = start_pos + num_bytes; + Ok((&bytes[start_pos..next_pos], next_pos)) +} + +fn decode_list(bytes: &[u8], pos: usize, opts: BDecodeOpt, depth: usize) -> BencodeParseResult<(Vec>, usize)> { + let mut bencode_list = Vec::new(); + + let mut curr_pos = pos; + let mut curr_byte = peek_byte(bytes, curr_pos)?; + + while curr_byte != crate::BEN_END { + let (bencode, next_pos) = decode(bytes, curr_pos, opts, depth + 1)?; + + bencode_list.push(bencode); + + curr_pos = next_pos; + curr_byte = peek_byte(bytes, curr_pos)?; + } + + let next_pos = curr_pos + 1; + Ok((bencode_list, next_pos)) +} + +fn decode_dict( + bytes: &[u8], + pos: usize, + opts: BDecodeOpt, + depth: usize, +) -> BencodeParseResult<(BTreeMap<&[u8], BencodeRef<'_>>, usize)> { + let mut bencode_dict = BTreeMap::new(); + + let mut curr_pos = pos; + let mut curr_byte = peek_byte(bytes, curr_pos)?; + + while curr_byte != crate::BEN_END { + let (key_bytes, next_pos) = decode_bytes(bytes, curr_pos)?; + + // Spec says that the keys must be in alphabetical order + match (bencode_dict.keys().last(), opts.check_key_sort()) { + (Some(last_key), true) if key_bytes < *last_key => { + return Err(BencodeParseError::InvalidKeyOrdering { + pos: curr_pos, + key: key_bytes.to_vec(), + }) + } + _ => (), + }; + curr_pos = next_pos; + + let (value, next_pos) = decode(bytes, curr_pos, opts, depth + 1)?; + match bencode_dict.entry(key_bytes) { + Entry::Vacant(n) => n.insert(value), + Entry::Occupied(_) => { + return Err(BencodeParseError::InvalidKeyDuplicates { + pos: curr_pos, + key: key_bytes.to_vec(), + }) + } + }; + + curr_pos = next_pos; + curr_byte = peek_byte(bytes, curr_pos)?; + } + + let next_pos = curr_pos + 1; + Ok((bencode_dict, next_pos)) +} + +fn peek_byte(bytes: &[u8], pos: usize) -> BencodeParseResult { + bytes.get(pos).copied().ok_or(BencodeParseError::BytesEmpty { pos }) +} + +#[cfg(test)] +mod tests { + + use crate::access::bencode::BRefAccess; + use crate::reference::bencode_ref::BencodeRef; + use crate::reference::decode_opt::BDecodeOpt; + + /* cSpell:disable */ + // Positive Cases + const GENERAL: &[u8] = b"d0:12:zero_len_key8:location17:udp://test.com:8011:nested dictd4:listli-500500eee6:numberi500500ee"; + const RECURSION: &[u8] = b"lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllleeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"; + const BYTES_UTF8: &[u8] = b"16:valid_utf8_bytes"; + const DICTIONARY: &[u8] = b"d9:test_dictd10:nested_key12:nested_value11:nested_listli500ei-500ei0eee8:test_key10:test_valuee"; + const LIST: &[u8] = b"l10:test_bytesi500ei0ei-500el12:nested_bytesed8:test_key10:test_valueee"; + const BYTES: &[u8] = b"5:\xC5\xE6\xBE\xE6\xF2"; + const BYTES_ZERO_LEN: &[u8] = b"0:"; + const INT: &[u8] = b"i500e"; + const INT_NEGATIVE: &[u8] = b"i-500e"; + const INT_ZERO: &[u8] = b"i0e"; + const PARTIAL: &[u8] = b"i0e_asd"; + + // Negative Cases + const BYTES_NEG_LEN: &[u8] = b"-4:test"; + const BYTES_EXTRA: &[u8] = b"l15:processed_bytese17:unprocessed_bytes"; + const BYTES_NOT_UTF8: &[u8] = b"5:\xC5\xE6\xBE\xE6\xF2"; + const INT_NAN: &[u8] = b"i500a500e"; + const INT_LEADING_ZERO: &[u8] = b"i0500e"; + const INT_DOUBLE_ZERO: &[u8] = b"i00e"; + const INT_NEGATIVE_ZERO: &[u8] = b"i-0e"; + const INT_DOUBLE_NEGATIVE: &[u8] = b"i--5e"; + const DICT_UNORDERED_KEYS: &[u8] = b"d5:z_key5:value5:a_key5:valuee"; + const DICT_DUP_KEYS_SAME_DATA: &[u8] = b"d5:a_keyi0e5:a_keyi0ee"; + const DICT_DUP_KEYS_DIFF_DATA: &[u8] = b"d5:a_keyi0e5:a_key7:a_valuee"; + /* cSpell:enable */ + + #[test] + fn positive_decode_general() { + let bencode = BencodeRef::decode(GENERAL, BDecodeOpt::default()).unwrap(); + + let ben_dict = bencode.dict().unwrap(); + assert_eq!(ben_dict.lookup("".as_bytes()).unwrap().str().unwrap(), "zero_len_key"); + assert_eq!( + ben_dict.lookup("location".as_bytes()).unwrap().str().unwrap(), + "udp://test.com:80" + ); + assert_eq!(ben_dict.lookup("number".as_bytes()).unwrap().int().unwrap(), 500_500_i64); + + let nested_dict = ben_dict.lookup("nested dict".as_bytes()).unwrap().dict().unwrap(); + let nested_list = nested_dict.lookup("list".as_bytes()).unwrap().list().unwrap(); + assert_eq!(nested_list[0].int().unwrap(), -500_500_i64); + } + + #[test] + fn positive_decode_recursion() { + BencodeRef::decode(RECURSION, BDecodeOpt::new(50, true, true)).unwrap_err(); + + // As long as we didn't overflow our call stack, we are good! + } + + #[test] + fn positive_decode_bytes_utf8() { + let bencode = BencodeRef::decode(BYTES_UTF8, BDecodeOpt::default()).unwrap(); + + assert_eq!(bencode.str().unwrap(), "valid_utf8_bytes"); + } + + #[test] + fn positive_decode_dict() { + let bencode = BencodeRef::decode(DICTIONARY, BDecodeOpt::default()).unwrap(); + let dict = bencode.dict().unwrap(); + assert_eq!(dict.lookup("test_key".as_bytes()).unwrap().str().unwrap(), "test_value"); + + let nested_dict = dict.lookup("test_dict".as_bytes()).unwrap().dict().unwrap(); + assert_eq!( + nested_dict.lookup("nested_key".as_bytes()).unwrap().str().unwrap(), + "nested_value" + ); + + let nested_list = nested_dict.lookup("nested_list".as_bytes()).unwrap().list().unwrap(); + assert_eq!(nested_list[0].int().unwrap(), 500i64); + assert_eq!(nested_list[1].int().unwrap(), -500i64); + assert_eq!(nested_list[2].int().unwrap(), 0i64); + } + + #[test] + fn positive_decode_list() { + let bencode = BencodeRef::decode(LIST, BDecodeOpt::default()).unwrap(); + let list = bencode.list().unwrap(); + + assert_eq!(list[0].str().unwrap(), "test_bytes"); + assert_eq!(list[1].int().unwrap(), 500i64); + assert_eq!(list[2].int().unwrap(), 0i64); + assert_eq!(list[3].int().unwrap(), -500i64); + + let nested_list = list[4].list().unwrap(); + assert_eq!(nested_list[0].str().unwrap(), "nested_bytes"); + + let nested_dict = list[5].dict().unwrap(); + assert_eq!( + nested_dict.lookup("test_key".as_bytes()).unwrap().str().unwrap(), + "test_value" + ); + } + + #[test] + fn positive_decode_bytes() { + let bytes = super::decode_bytes(BYTES, 0).unwrap().0; + assert_eq!(bytes.len(), 5); + assert_eq!(bytes[0] as char, 'Ã…'); + assert_eq!(bytes[1] as char, 'æ'); + assert_eq!(bytes[2] as char, '¾'); + assert_eq!(bytes[3] as char, 'æ'); + assert_eq!(bytes[4] as char, 'ò'); + } + + #[test] + fn positive_decode_bytes_zero_len() { + let bytes = super::decode_bytes(BYTES_ZERO_LEN, 0).unwrap().0; + assert_eq!(bytes.len(), 0); + } + + #[test] + fn positive_decode_int() { + let int_value = super::decode_int(INT, 1, crate::BEN_END).unwrap().0; + assert_eq!(int_value, 500i64); + } + + #[test] + fn positive_decode_int_negative() { + let int_value = super::decode_int(INT_NEGATIVE, 1, crate::BEN_END).unwrap().0; + assert_eq!(int_value, -500i64); + } + + #[test] + fn positive_decode_int_zero() { + let int_value = super::decode_int(INT_ZERO, 1, crate::BEN_END).unwrap().0; + assert_eq!(int_value, 0i64); + } + + #[test] + fn positive_decode_partial() { + let bencode = BencodeRef::decode(PARTIAL, BDecodeOpt::new(2, true, false)).unwrap(); + + assert_ne!(PARTIAL.len(), bencode.buffer().len()); + assert_eq!(3, bencode.buffer().len()); + } + + #[test] + fn positive_decode_dict_unordered_keys() { + BencodeRef::decode(DICT_UNORDERED_KEYS, BDecodeOpt::default()).unwrap(); + } + + #[test] + #[should_panic = "InvalidByte { pos: 0 }"] + fn negative_decode_bytes_neg_len() { + BencodeRef::decode(BYTES_NEG_LEN, BDecodeOpt::default()).unwrap(); + } + + #[test] + #[should_panic = "BytesEmpty { pos: 20 }"] + fn negative_decode_bytes_extra() { + BencodeRef::decode(BYTES_EXTRA, BDecodeOpt::default()).unwrap(); + } + + #[test] + fn negative_decode_bytes_not_utf8() { + let bencode = BencodeRef::decode(BYTES_NOT_UTF8, BDecodeOpt::default()).unwrap(); + + assert!(bencode.str().is_none()); + } + + #[test] + #[should_panic = "InvalidIntParseError { pos: 1 }"] + fn negative_decode_int_nan() { + super::decode_int(INT_NAN, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic = "InvalidIntZeroPadding { pos: 1 }"] + fn negative_decode_int_leading_zero() { + super::decode_int(INT_LEADING_ZERO, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic = "InvalidIntZeroPadding { pos: 1 }"] + fn negative_decode_int_double_zero() { + super::decode_int(INT_DOUBLE_ZERO, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic = "InvalidIntNegativeZero { pos: 1 }"] + fn negative_decode_int_negative_zero() { + super::decode_int(INT_NEGATIVE_ZERO, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic = " InvalidIntParseError { pos: 1 }"] + fn negative_decode_int_double_negative() { + super::decode_int(INT_DOUBLE_NEGATIVE, 1, crate::BEN_END).unwrap(); + } + + #[test] + #[should_panic = "InvalidKeyOrdering { pos: 15, key: [97, 95, 107, 101, 121] }"] + fn negative_decode_dict_unordered_keys() { + BencodeRef::decode(DICT_UNORDERED_KEYS, BDecodeOpt::new(5, true, true)).unwrap(); + } + + #[test] + #[should_panic = "InvalidKeyDuplicates { pos: 18, key: [97, 95, 107, 101, 121] }"] + fn negative_decode_dict_dup_keys_same_data() { + BencodeRef::decode(DICT_DUP_KEYS_SAME_DATA, BDecodeOpt::default()).unwrap(); + } + + #[test] + #[should_panic = "InvalidKeyDuplicates { pos: 18, key: [97, 95, 107, 101, 121] }"] + fn negative_decode_dict_dup_keys_diff_data() { + BencodeRef::decode(DICT_DUP_KEYS_DIFF_DATA, BDecodeOpt::default()).unwrap(); + } +} diff --git a/contrib/bencode/src/reference/decode_opt.rs b/contrib/bencode/src/reference/decode_opt.rs new file mode 100644 index 000000000..8409cc72c --- /dev/null +++ b/contrib/bencode/src/reference/decode_opt.rs @@ -0,0 +1,53 @@ +const DEFAULT_MAX_RECURSION: usize = 50; +const DEFAULT_CHECK_KEY_SORT: bool = false; +const DEFAULT_ENFORCE_FULL_DECODE: bool = true; + +/// Stores decoding options for modifying decode behavior. +#[derive(Copy, Clone)] +#[allow(clippy::module_name_repetitions)] +pub struct BDecodeOpt { + max_recursion: usize, + check_key_sort: bool, + enforce_full_decode: bool, +} + +impl BDecodeOpt { + /// Create a new `BDecodeOpt` object. + #[must_use] + pub fn new(max_recursion: usize, check_key_sort: bool, enforce_full_decode: bool) -> BDecodeOpt { + BDecodeOpt { + max_recursion, + check_key_sort, + enforce_full_decode, + } + } + + /// Maximum limit allowed when decoding bencode. + #[must_use] + pub fn max_recursion(&self) -> usize { + self.max_recursion + } + + /// Whether or not an error should be thrown for out of order dictionary keys. + #[must_use] + pub fn check_key_sort(&self) -> bool { + self.check_key_sort + } + + /// Whether or not we enforce that the decoded bencode must make up all of the input + /// bytes or not. + /// + /// It may be useful to disable this if for example, the input bencode is prepended to + /// some payload and you would like to disassociate it. In this case, to find where the + /// rest of the payload starts that wasn't decoded, get the bencode buffer, and call `len()`. + #[must_use] + pub fn enforce_full_decode(&self) -> bool { + self.enforce_full_decode + } +} + +impl Default for BDecodeOpt { + fn default() -> BDecodeOpt { + BDecodeOpt::new(DEFAULT_MAX_RECURSION, DEFAULT_CHECK_KEY_SORT, DEFAULT_ENFORCE_FULL_DECODE) + } +} diff --git a/contrib/bencode/src/reference/mod.rs b/contrib/bencode/src/reference/mod.rs new file mode 100644 index 000000000..6a0ae6e40 --- /dev/null +++ b/contrib/bencode/src/reference/mod.rs @@ -0,0 +1,3 @@ +pub mod bencode_ref; +pub mod decode; +pub mod decode_opt; diff --git a/contrib/bencode/tests/mod.rs b/contrib/bencode/tests/mod.rs new file mode 100644 index 000000000..14606c175 --- /dev/null +++ b/contrib/bencode/tests/mod.rs @@ -0,0 +1,18 @@ +use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map}; + +#[test] +fn positive_ben_map_macro() { + let result = (ben_map! { + "key" => ben_bytes!("value") + }) + .encode(); + + assert_eq!("d3:key5:valuee".as_bytes(), &result[..]); // cspell:disable-line +} + +#[test] +fn positive_ben_list_macro() { + let result = (ben_list!(ben_int!(5))).encode(); + + assert_eq!("li5ee".as_bytes(), &result[..]); // cspell:disable-line +} diff --git a/contrib/dev-tools/containers/docker-build.sh b/contrib/dev-tools/containers/docker-build.sh new file mode 100755 index 000000000..39143910f --- /dev/null +++ b/contrib/dev-tools/containers/docker-build.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +echo "Building docker image ..." + +docker build --target release --tag torrust-tracker:release --file Containerfile . diff --git a/contrib/dev-tools/containers/docker-install.sh b/contrib/dev-tools/containers/docker-install.sh new file mode 100755 index 000000000..6034e8233 --- /dev/null +++ b/contrib/dev-tools/containers/docker-install.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./contrib/dev-tools/containers/docker-build.sh diff --git a/contrib/dev-tools/containers/docker-run-local.sh b/contrib/dev-tools/containers/docker-run-local.sh new file mode 100755 index 000000000..05e23f4a0 --- /dev/null +++ b/contrib/dev-tools/containers/docker-run-local.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +docker run -it \ + --env USER_ID"$(id -u)" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume ./storage/tracker/lib:/var/lib/torrust/tracker:rw \ + --volume ./storage/tracker/log:/var/log/torrust/tracker:rw \ + --volume ./storage/tracker/etc:/etc/torrust/tracker:rw \ + torrust-tracker:release diff --git a/contrib/dev-tools/containers/docker-run-public.sh b/contrib/dev-tools/containers/docker-run-public.sh new file mode 100755 index 000000000..73bcf600a --- /dev/null +++ b/contrib/dev-tools/containers/docker-run-public.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +docker run -it \ + --env USER_ID"$(id -u)" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume ./storage/tracker/lib:/var/lib/torrust/tracker:rw \ + --volume ./storage/tracker/log:/var/log/torrust/tracker:rw \ + --volume ./storage/tracker/etc:/etc/torrust/tracker:rw \ + torrust/tracker:latest diff --git a/contrib/dev-tools/init/install-local.sh b/contrib/dev-tools/init/install-local.sh new file mode 100755 index 000000000..747c357bc --- /dev/null +++ b/contrib/dev-tools/init/install-local.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# This script is only intended to be used for local development or testing environments. + +# Generate storage directory if it does not exist +mkdir -p ./storage/tracker/lib/database + +# Generate the sqlite database if it does not exist +if ! [ -f "./storage/tracker/lib/database/sqlite3.db" ]; then + sqlite3 ./storage/tracker/lib/database/sqlite3.db "VACUUM;" +fi diff --git a/contrib/dev-tools/su-exec/LICENSE b/contrib/dev-tools/su-exec/LICENSE new file mode 100644 index 000000000..f623b904e --- /dev/null +++ b/contrib/dev-tools/su-exec/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 ncopa + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/contrib/dev-tools/su-exec/Makefile b/contrib/dev-tools/su-exec/Makefile new file mode 100644 index 000000000..bda768957 --- /dev/null +++ b/contrib/dev-tools/su-exec/Makefile @@ -0,0 +1,17 @@ + +CFLAGS ?= -Wall -Werror -g +LDFLAGS ?= + +PROG := su-exec +SRCS := $(PROG).c + +all: $(PROG) + +$(PROG): $(SRCS) + $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) + +$(PROG)-static: $(SRCS) + $(CC) $(CFLAGS) -o $@ $^ -static $(LDFLAGS) + +clean: + rm -f $(PROG) $(PROG)-static diff --git a/contrib/dev-tools/su-exec/README.md b/contrib/dev-tools/su-exec/README.md new file mode 100644 index 000000000..2b0517377 --- /dev/null +++ b/contrib/dev-tools/su-exec/README.md @@ -0,0 +1,46 @@ +# su-exec +switch user and group id, setgroups and exec + +## Purpose + +This is a simple tool that will simply execute a program with different +privileges. The program will be executed directly and not run as a child, +like su and sudo does, which avoids TTY and signal issues (see below). + +Notice that su-exec depends on being run by the root user, non-root +users do not have permission to change uid/gid. + +## Usage + +```shell +su-exec user-spec command [ arguments... ] +``` + +`user-spec` is either a user name (e.g. `nobody`) or user name and group +name separated with colon (e.g. `nobody:ftp`). Numeric uid/gid values +can be used instead of names. Example: + +```shell +$ su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf +``` + +## TTY & parent/child handling + +Notice how `su` will make `ps` be a child of a shell while `su-exec` +just executes `ps` directly. + +```shell +$ docker run -it --rm alpine:edge su postgres -c 'ps aux' +PID USER TIME COMMAND + 1 postgres 0:00 ash -c ps aux + 12 postgres 0:00 ps aux +$ docker run -it --rm -v $PWD/su-exec:/sbin/su-exec:ro alpine:edge su-exec postgres ps aux +PID USER TIME COMMAND + 1 postgres 0:00 ps aux +``` + +## Why reinvent gosu? + +This does more or less exactly the same thing as [gosu](https://github.com/tianon/gosu) +but it is only 10kb instead of 1.8MB. + diff --git a/contrib/dev-tools/su-exec/su-exec.c b/contrib/dev-tools/su-exec/su-exec.c new file mode 100644 index 000000000..499071c6e --- /dev/null +++ b/contrib/dev-tools/su-exec/su-exec.c @@ -0,0 +1,109 @@ +/* set user and group id and exec */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static char *argv0; + +static void usage(int exitcode) +{ + printf("Usage: %s user-spec command [args]\n", argv0); + exit(exitcode); +} + +int main(int argc, char *argv[]) +{ + char *user, *group, **cmdargv; + char *end; + + uid_t uid = getuid(); + gid_t gid = getgid(); + + argv0 = argv[0]; + if (argc < 3) + usage(0); + + user = argv[1]; + group = strchr(user, ':'); + if (group) + *group++ = '\0'; + + cmdargv = &argv[2]; + + struct passwd *pw = NULL; + if (user[0] != '\0') { + uid_t nuid = strtol(user, &end, 10); + if (*end == '\0') + uid = nuid; + else { + pw = getpwnam(user); + if (pw == NULL) + err(1, "getpwnam(%s)", user); + } + } + if (pw == NULL) { + pw = getpwuid(uid); + } + if (pw != NULL) { + uid = pw->pw_uid; + gid = pw->pw_gid; + } + + setenv("HOME", pw != NULL ? pw->pw_dir : "/", 1); + + if (group && group[0] != '\0') { + /* group was specified, ignore grouplist for setgroups later */ + pw = NULL; + + gid_t ngid = strtol(group, &end, 10); + if (*end == '\0') + gid = ngid; + else { + struct group *gr = getgrnam(group); + if (gr == NULL) + err(1, "getgrnam(%s)", group); + gid = gr->gr_gid; + } + } + + if (pw == NULL) { + if (setgroups(1, &gid) < 0) + err(1, "setgroups(%i)", gid); + } else { + int ngroups = 0; + gid_t *glist = NULL; + + while (1) { + int r = getgrouplist(pw->pw_name, gid, glist, &ngroups); + + if (r >= 0) { + if (setgroups(ngroups, glist) < 0) + err(1, "setgroups"); + break; + } + + glist = realloc(glist, ngroups * sizeof(gid_t)); + if (glist == NULL) + err(1, "malloc"); + } + } + + if (setgid(gid) < 0) + err(1, "setgid(%i)", gid); + + if (setuid(uid) < 0) + err(1, "setuid(%i)", uid); + + execvp(cmdargv[0], cmdargv); + err(1, "%s", cmdargv[0]); + + return 1; +} diff --git a/docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md b/docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md new file mode 100644 index 000000000..beb3cee00 --- /dev/null +++ b/docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md @@ -0,0 +1,35 @@ +# Use plural for modules containing collections of types + +## Description + +In Rust, the naming conventions for module names (mod names) generally lean +towards using the singular form, rather than plurals. This practice aligns with +Rust's emphasis on clarity and precision in code organization. The idea is that +a module name should represent a single concept or functionality, which often +means using a singular noun to describe what the module contains or does. + +However, it's important to note that conventions can vary depending on the +context or the specific project. Some projects may choose to use plural forms +for module names if they feel it more accurately represents the contents of the +module. For example, a module that contains multiple implementations of a +similar concept or utility functions related to a specific theme might be named +in the plural to reflect the diversity of its contents. + +This could have some pros anc cons. For example, for a module containing types of +requests you could refer to a concrete request with `request::Announce` or +`requests::Announce`. If you read a code line `request::Announce` is probably +better. However, if you read the filed or folder name `requests`gives you a +better idea of what the modules contains. + +## Agreement + +We agree on use plural in cases where the modules contain some types with the +same type of responsibility. For example: + +- `src/servers`. +- `src/servers/http/v1/requests`. +- `src/servers/http/v1/responses`. +- `src/servers/http/v1/services`. +- Etcetera. + +We will change them progressively. diff --git a/docs/adrs/README.md b/docs/adrs/README.md new file mode 100644 index 000000000..85986fc36 --- /dev/null +++ b/docs/adrs/README.md @@ -0,0 +1,23 @@ +# Architectural Decision Records (ADRs) + +This directory contains the architectural decision records (ADRs) for the +project. ADRs are a way to document the architectural decisions made in the +project. + +More info: . + +## How to add a new record + +For the prefix: + +```s +date -u +"%Y%m%d%H%M%S" +``` + +Then you can create a new markdown file with the following format: + +```s +20230510152112_title.md +``` + +For the time being, we are not following any specific template. diff --git a/docs/benchmarking.md b/docs/benchmarking.md new file mode 100644 index 000000000..7d0228737 --- /dev/null +++ b/docs/benchmarking.md @@ -0,0 +1,303 @@ +# Benchmarking + +We have two types of benchmarking: + +- E2E benchmarking running the UDP tracker. +- Internal torrents repository benchmarking. + +## E2E benchmarking + +We are using the scripts provided by [aquatic](https://github.com/greatest-ape/aquatic). + +How to install both commands: + +```console +cargo install aquatic_udp_load_test && cargo install aquatic_http_load_test +``` + +You can also clone and build the repos. It's the way used for the results shown +in this documentation. + +```console +git clone git@github.com:greatest-ape/aquatic.git +cd aquatic +cargo build --release -p aquatic_udp_load_test +``` + +### Run UDP load test + +Run the tracker with UDP service enabled and other services disabled and set log threshold to `error`. + +```toml +[logging] +threshold = "error" + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" +``` + +Build and run the tracker: + +```console +cargo build --release +TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" ./target/release/torrust-tracker +``` + +Run the load test with: + +```console +./target/release/aquatic_udp_load_test +``` + +> NOTICE: You need to modify the port in the `udp_load_test` crate to use `6969` and rebuild. + +Output: + +```output +Starting client with config: Config { + server_address: 127.0.0.1:6969, + log_level: Error, + workers: 1, + duration: 0, + summarize_last: 0, + extra_statistics: true, + network: NetworkConfig { + multiple_client_ipv4s: true, + sockets_per_worker: 4, + recv_buffer: 8000000, + }, + requests: RequestConfig { + number_of_torrents: 1000000, + number_of_peers: 2000000, + scrape_max_torrents: 10, + announce_peers_wanted: 30, + weight_connect: 50, + weight_announce: 50, + weight_scrape: 1, + peer_seeder_probability: 0.75, + }, +} + +Requests out: 398367.11/second +Responses in: 358530.40/second + - Connect responses: 177567.60 + - Announce responses: 177508.08 + - Scrape responses: 3454.72 + - Error responses: 0.00 +Peers per announce response: 0.00 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 2 + - p95: 3 + - p99: 105 + - p99.9: 289 + - p100: 361 +``` + +> IMPORTANT: The performance of the Torrust UDP Tracker is drastically decreased with these log threshold: `info`, `debug`, `trace`. + +```output +Requests out: 40719.21/second +Responses in: 33762.72/second + - Connect responses: 16732.76 + - Announce responses: 16692.98 + - Scrape responses: 336.98 + - Error responses: 0.00 +Peers per announce response: 0.00 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 7 + - p95: 14 + - p99: 27 + - p99.9: 35 + - p100: 45 +``` + +### Comparing UDP tracker with other Rust implementations + +#### Aquatic UDP Tracker + +Running the tracker: + +```console +git clone git@github.com:greatest-ape/aquatic.git +cd aquatic +cargo build --release -p aquatic_udp +./target/release/aquatic_udp -p > "aquatic-udp-config.toml" +./target/release/aquatic_udp -c "aquatic-udp-config.toml" +``` + +Run the load test with: + +```console +./target/release/aquatic_udp_load_test +``` + +```output +Requests out: 432896.42/second +Responses in: 389577.70/second + - Connect responses: 192864.02 + - Announce responses: 192817.55 + - Scrape responses: 3896.13 + - Error responses: 0.00 +Peers per announce response: 21.55 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 2 + - p95: 3 + - p99: 105 + - p99.9: 311 + - p100: 395 +``` + +#### Torrust-Actix UDP Tracker + +Run the tracker with UDP service enabled and other services disabled and set log threshold to `error`. + +```toml +[logging] +threshold = "error" + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" +``` + +```console +git clone https://github.com/Power2All/torrust-actix.git +cd torrust-actix +cargo build --release +./target/release/torrust-actix --create-config +./target/release/torrust-actix +``` + +Run the load test with: + +```console +./target/release/aquatic_udp_load_test +``` + +> NOTICE: You need to modify the port in the `udp_load_test` crate to use `6969` and rebuild. + +```output +Requests out: 200953.97/second +Responses in: 180858.14/second + - Connect responses: 89517.13 + - Announce responses: 89539.67 + - Scrape responses: 1801.34 + - Error responses: 0.00 +Peers per announce response: 1.00 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 2 + - p95: 7 + - p99: 87 + - p99.9: 155 + - p100: 188 +``` + +### Results + +Announce request per second: + +| Tracker | Announce | +|---------------|-----------| +| Aquatic | 192,817 | +| Torrust | 177,508 | +| Torrust-Actix | 89,539 | + +Using a PC with: + +- RAM: 64GiB +- Processor: AMD Ryzen 9 7950X x 32 +- Graphics: AMD Radeon Graphics / Intel Arc A770 Graphics (DG2) +- OS: Ubuntu 23.04 +- OS Type: 64-bit +- Kernel Version: Linux 6.2.0-20-generic + +## Repository benchmarking + +### Requirements + +You need to install the `gnuplot` package. + +```console +sudo apt install gnuplot +``` + +### Run + +You can run it with: + +```console +cargo bench -p torrust-tracker-torrent-repository +``` + +It tests the different implementations for the internal torrent storage. The output should be something like this: + +```output + Running benches/repository_benchmark.rs (target/release/deps/repository_benchmark-2f7830898bbdfba4) +add_one_torrent/RwLockStd + time: [60.936 ns 61.383 ns 61.764 ns] +Found 24 outliers among 100 measurements (24.00%) + 15 (15.00%) high mild + 9 (9.00%) high severe +add_one_torrent/RwLockStdMutexStd + time: [60.829 ns 60.937 ns 61.053 ns] +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe +add_one_torrent/RwLockStdMutexTokio + time: [96.034 ns 96.243 ns 96.545 ns] +Found 6 outliers among 100 measurements (6.00%) + 4 (4.00%) high mild + 2 (2.00%) high severe +add_one_torrent/RwLockTokio + time: [108.25 ns 108.66 ns 109.06 ns] +Found 2 outliers among 100 measurements (2.00%) + 2 (2.00%) low mild +add_one_torrent/RwLockTokioMutexStd + time: [109.03 ns 109.11 ns 109.19 ns] +Found 4 outliers among 100 measurements (4.00%) + 1 (1.00%) low mild + 1 (1.00%) high mild + 2 (2.00%) high severe +Benchmarking add_one_torrent/RwLockTokioMutexTokio: Collecting 100 samples in estimated 1.0003 s (7.1M iterationsadd_one_torrent/RwLockTokioMutexTokio + time: [139.64 ns 140.11 ns 140.62 ns] +``` + +After running it you should have a new directory containing the criterion reports: + +```console +target/criterion/ +├── add_multiple_torrents_in_parallel +├── add_one_torrent +├── report +├── update_multiple_torrents_in_parallel +└── update_one_torrent_in_parallel +``` + +You can see one report for each of the operations we are considering for benchmarking: + +- Add multiple torrents in parallel. +- Add one torrent. +- Update multiple torrents in parallel. +- Update one torrent in parallel. + +Each report look like the following: + +![Torrent repository implementations benchmarking report](./media/torrent-repository-implementations-benchmarking-report.png) + +## Other considerations + +If you are interested in knowing more about the tracker performance or contribute to improve its performance you ca join the [performance optimizations discussion](https://github.com/torrust/torrust-tracker/discussions/774). diff --git a/docs/containers.md b/docs/containers.md new file mode 100644 index 000000000..cddd2ba98 --- /dev/null +++ b/docs/containers.md @@ -0,0 +1,440 @@ +# Containers (Docker or Podman) + +## Demo environment + +It is simple to setup the tracker with the default +configuration and run it using the pre-built public docker image: + +With Docker: + +```sh +docker run -it torrust/tracker:latest +``` + +or with Podman: + +```sh +podman run -it docker.io/torrust/tracker:latest +``` + +## Requirements + +- Tested with recent versions of Docker or Podman. + +## Volumes + +The [Containerfile](../Containerfile) (i.e. the Dockerfile) Defines Three Volumes: + +```Dockerfile +VOLUME ["/var/lib/torrust/tracker","/var/log/torrust/tracker","/etc/torrust/tracker"] +``` + +When instancing the container image with the `docker run` or `podman run` command, we map these volumes to the local storage: + +```s +./storage/tracker/lib -> /var/lib/torrust/tracker +./storage/tracker/log -> /var/log/torrust/tracker +./storage/tracker/etc -> /etc/torrust/tracker +``` + +> NOTE: You can adjust this mapping for your preference, however this mapping is the default in our guides and scripts. + +### Pre-Create Host-Mapped Folders + +Please run this command where you wish to run the container: + +```sh +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ +``` + +### Matching Ownership ID's of Host Storage and Container Volumes + +It is important that the `torrust` user has the same uid `$(id -u)` as the host mapped folders. In our [entry script](../share/container/entry_script_sh), installed to `/usr/local/bin/entry.sh` inside the container, switches to the `torrust` user created based upon the `USER_UID` environmental variable. + +When running the container, you may use the `--env USER_ID="$(id -u)"` argument that gets the current user-id and passes to the container. + +### Mapped Tree Structure + +Using the standard mapping defined above produces this following mapped tree: + +```s +storage/tracker/ +├── lib +│ ├── database +│ │   └── sqlite3.db => /var/lib/torrust/tracker/database/sqlite3.db [auto populated] +│ └── tls +│ ├── localhost.crt => /var/lib/torrust/tracker/tls/localhost.crt [user supplied] +│ └── localhost.key => /var/lib/torrust/tracker/tls/localhost.key [user supplied] +├── log => /var/log/torrust/tracker (future use) +└── etc + └── tracker.toml => /etc/torrust/tracker/tracker.toml [auto populated] +``` + +> NOTE: you only need the `tls` directory and certificates in case you have enabled SSL. + +## Building the Container + +### Clone and Change into Repository + +```sh +# Inside your dev folder +git clone https://github.com/torrust/torrust-tracker.git; cd torrust-tracker +``` + +### (Docker) Setup Context + +Before starting, if you are using docker, it is helpful to reset the context to the default: + +```sh +docker context use default +``` + +### (Docker) Build + +```sh +# Release Mode +docker build --target release --tag torrust-tracker:release --file Containerfile . + +# Debug Mode +docker build --target debug --tag torrust-tracker:debug --file Containerfile . +``` + +### (Podman) Build + +```sh +# Release Mode +podman build --target release --tag torrust-tracker:release --file Containerfile . + +# Debug Mode +podman build --target debug --tag torrust-tracker:debug --file Containerfile . +``` + +## Running the Container + +### Basic Run + +No arguments are needed for simply checking the container image works: + +#### (Docker) Run Basic + +```sh +# Release Mode +docker run -it torrust-tracker:release + +# Debug Mode +docker run -it torrust-tracker:debug +``` + +#### (Podman) Run Basic + +```sh +# Release Mode +podman run -it docker.io/torrust-tracker:release + +# Debug Mode +podman run -it docker.io/torrust-tracker:debug +``` + +### Arguments + +The arguments need to be placed before the image tag. i.e. + +`run [arguments] torrust-tracker:release` + +#### Environmental Variables + +Environmental variables are loaded through the `--env`, in the format `--env VAR="value"`. + +The following environmental variables can be set: + +- `TORRUST_TRACKER_CONFIG_TOML_PATH` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). +- `TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN` - Override of the admin token. If set, this value overrides any value set in the config. +- `TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER` - The database type used for the container, (options: `sqlite3`, `mysql`, default `sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. +- `TORRUST_TRACKER_CONFIG_TOML` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG_TOML=$(cat tracker-tracker.toml)`). +- `USER_ID` - The user id for the runtime crated `torrust` user. Please Note: This user id should match the ownership of the host-mapped volumes, (default `1000`). +- `UDP_PORT` - The port for the UDP tracker. This should match the port used in the configuration, (default `6969`). +- `HTTP_PORT` - The port for the HTTP tracker. This should match the port used in the configuration, (default `7070`). +- `API_PORT` - The port for the tracker API. This should match the port used in the configuration, (default `1212`). +- `HEALTH_CHECK_API_PORT` - The port for the Health Check API. This should match the port used in the configuration, (default `1313`). + +### Sockets + +Socket ports used internally within the container can be mapped to with the `--publish` argument. + +The format is: `--publish [optional_host_ip]:[host_port]:[container_port]/[optional_protocol]`, for example: `--publish 127.0.0.1:8080:80/tcp`. + +The default ports can be mapped with the following: + +```s +--publish 0.0.0.0:7070:7070/tcp \ +--publish 0.0.0.0:6969:6969/udp \ +--publish 0.0.0.0:1212:1212/tcp \ +``` + +> NOTE: Inside the container it is necessary to expose a socket with the wildcard address `0.0.0.0` so that it may be accessible from the host. Verify that the configuration that the sockets are wildcard. + +### Host-mapped Volumes + +By default the container will use install volumes for `/var/lib/torrust/tracker`, `/var/log/torrust/tracker`, and `/etc/torrust/tracker`, however for better administration it good to make these volumes host-mapped. + +The argument to host-map volumes is `--volume`, with the format: `--volume=[host-src:]container-dest[:]`. + +The default mapping can be supplied with the following arguments: + +```s +--volume ./storage/tracker/lib:/var/lib/torrust/tracker:Z \ +--volume ./storage/tracker/log:/var/log/torrust/tracker:Z \ +--volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ +``` + +Please not the `:Z` at the end of the podman `--volume` mapping arguments, this is to give read-write permission on SELinux enabled systemd, if this doesn't work on your system, you can use `:rw` instead. + +## Complete Example + +### With Docker + +```sh +## Setup Docker Default Context +docker context use default + +## Build Container Image +docker build --target release --tag torrust-tracker:release --file Containerfile . + +## Setup Mapped Volumes +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +## Run Torrust Tracker Container Image +docker run -it \ + --env TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN="MySecretToken" \ + --env USER_ID="$(id -u)" \ + --publish 0.0.0.0:7070:7070/tcp \ + --publish 0.0.0.0:6969:6969/udp \ + --publish 0.0.0.0:1212:1212/tcp \ + --volume ./storage/tracker/lib:/var/lib/torrust/tracker:Z \ + --volume ./storage/tracker/log:/var/log/torrust/tracker:Z \ + --volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ + torrust-tracker:release +``` + +### With Podman + +```sh +## Build Container Image +podman build --target release --tag torrust-tracker:release --file Containerfile . + +## Setup Mapped Volumes +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +## Run Torrust Tracker Container Image +podman run -it \ + --env TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN="MySecretToken" \ + --env USER_ID="$(id -u)" \ + --publish 0.0.0.0:7070:7070/tcp \ + --publish 0.0.0.0:6969:6969/udp \ + --publish 0.0.0.0:1212:1212/tcp \ + --volume ./storage/tracker/lib:/var/lib/torrust/tracker:Z \ + --volume ./storage/tracker/log:/var/log/torrust/tracker:Z \ + --volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ + docker.io/torrust-tracker:release +``` + +## Docker Compose + +The docker-compose configuration includes the MySQL service configuration. If you want to use MySQL instead of SQLite you should verify the `/etc/torrust/tracker/tracker.toml` (i.e `./storage/tracker/etc/tracker.toml`) configuration: + +```toml +[core.database] +driver = "mysql" +path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +``` + +### Build and Run: + +```sh +docker build --target release --tag torrust-tracker:release --file Containerfile . + +mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ + +USER_ID=$(id -u) \ + TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN="MySecretToken" \ + docker compose up --build +``` + +After running the `compose up` command you will have two running containers: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +06feacb91a9e torrust-tracker "cargo run" 18 minutes ago Up 4 seconds 0.0.0.0:1212->1212/tcp, :::1212->1212/tcp, 0.0.0.0:7070->7070/tcp, :::7070->7070/tcp, 0.0.0.0:6969->6969/udp, :::6969->6969/udp torrust-tracker-1 +34d29e792ee2 mysql:8.0 "docker-entrypoint.s…" 18 minutes ago Up 5 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp torrust-mysql-1 +``` + +And you should be able to use the application, for example making a request to the API: + + + +You can stop the containers with: + +```s +docker compose down +``` + +Additionally, you can delete all resources (containers, volumes, networks) with: + +```s +docker compose down -v +``` + +### Access Mysql with docker + +These are some useful commands for MySQL. + +Open a shell in the MySQL container using docker or docker-compose. + +```s +docker exec -it torrust-mysql-1 /bin/bash +docker compose exec mysql /bin/bash +``` + +Connect to MySQL from inside the MySQL container or from the host: + +```s +mysql -h127.0.0.1 -uroot -proot_secret_password +``` + +The when MySQL container is started the first time, it creates the database, user, and permissions needed. +If you see the error "Host is not allowed to connect to this MySQL server" you can check that users have the right permissions in the database. Make sure the user `root` and `db_user` can connect from any host (`%`). + +```s +mysql> SELECT host, user FROM mysql.user; ++-----------+------------------+ +| host | user | ++-----------+------------------+ +| % | db_user | +| % | root | +| localhost | mysql.infoschema | +| localhost | mysql.session | +| localhost | mysql.sys | +| localhost | root | ++-----------+------------------+ +6 rows in set (0.00 sec) +``` + +If the database, user or permissions are not created the reason could be the MySQL container volume can be corrupted. Delete it and start again the containers. + +### SSL Certificates + +You can use a certificate for localhost. You can create your [localhost certificate](https://letsencrypt.org/docs/certificates-for-localhost/#making-and-trusting-your-own-certificates) and use it in the `storage` folder and the configuration file (`tracker.toml`). For example: + +The storage folder must contain your certificates: + +```s +storage/tracker/lib/tls + ├── localhost.crt + └── localhost.key +storage/http_api/lib/tls + ├── localhost.crt + └── localhost.key +``` + +You have not enabled it in your `tracker.toml` file: + +```toml +[http_trackers.tsl_config] +ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +ssl_key_path = "./storage/tracker/lib/tls/localhost.key" + +[http_api.tsl_config] +ssl_cert_path = "./storage/http_api/lib/tls/localhost.crt" +ssl_key_path = "./storage/http_api/lib/tls/localhost.key" +``` + +> NOTE: you can enable it independently for each HTTP tracker or the API. + +If you enable the SSL certificate for the API, for example, you can load the API with this URL: + + + +## Prod environment + +In this section, you will learn how to deploy the tracker to a single docker container in Azure Container Instances. + +> NOTE: Azure Container Instances is a solution when you want to run an isolated container. If you need full container orchestration, including service discovery across multiple containers, automatic scaling, and coordinated application upgrades, we recommend [Kubernetes](https://kubernetes.io/). + +Deploy to Azure Container Instance following [docker documentation](https://docs.docker.com/cloud/aci-integration/). + +You have to create the ACI context and the storage: + +```s +docker context create aci myacicontext +docker context use myacicontext +docker volume create test-volume --storage-account torrustracker +``` + +You need to create all the files needed by the application in the storage dir `storage/lib/database`. + +And finally, you can run the container: + +```s +docker run \ + --env USER_ID="$(id -u)" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume torrustracker/lib:/var/lib/torrust/tracker:rw \ + --volume torrustracker/log:/var/log/torrust/tracker:rw \ + --volume torrustracker/etc:/etc/torrust/tracker:rw \ + registry.hub.docker.com/torrust/tracker:latest +``` + +Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. + +```s +docker run \ + --detach + --env USER_ID="$(id -u)" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \latest + --volume torrustracker/lib:/var/lib/torrust/tracker:rw \ + --volume torrustracker/log:/var/log/torrust/tracker:rw \ + --volume torrustracker/etc:/etc/torrust/tracker:rw \ + registry.hub.docker.com/torrust/tracker:latest +``` + +You should see something like this: + +```s +[+] Running 2/2 + â ¿ Group intelligent-hawking Created 5.0s + â ¿ intelligent-hawking Created 41.7s +2022-12-08T18:39:19.697869300+00:00 [torrust_tracker::logging][INFO] logging initialized. +2022-12-08T18:39:19.712651100+00:00 [torrust_tracker::jobs::udp_tracker][INFO] Starting UDP server on: 0.0.0.0:6969 +2022-12-08T18:39:19.712792700+00:00 [torrust_tracker::jobs::tracker_api][INFO] Starting Torrust API server on: 0.0.0.0:1212 +2022-12-08T18:39:19.725124+00:00 [torrust_tracker::jobs::tracker_api][INFO] Torrust API server started +``` + +You can see the container with: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND STATUS PORTS +intelligent-hawking registry.hub.docker.com/torrust/tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp +``` + +After a while, you can use the tracker API `http://4.236.213.57:1212/api/v1/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. + +> NOTES: +> +> - [There is no support for mounting a single file](https://docs.docker.com/cloud/aci-container-features/#persistent-volumes), or mounting a subfolder from an `Azure File Share`. +> - [ACI does not allow port mapping](https://docs.docker.com/cloud/aci-integration/#exposing-ports). +> - [Azure file share volume mount requires the Linux container run as root](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations). +> - It can take some minutes until the public IP for the ACI container is available. +> - You can use the Azure web UI to download files from the storage. For example, the SQLite database. +> - [It seems you can only expose web interfaces on port 80 on Azure Container Instances](https://stackoverflow.com/a/56768087/3012842). Not official documentation! + +## Links + +- [Deploying Docker containers on Azure](https://docs.docker.com/cloud/aci-integration/). +- [Docker run options for ACI containers](https://docs.docker.com/cloud/aci-container-features/). +- [Quickstart: Deploy a container instance in Azure using the Docker CLI](https://learn.microsoft.com/en-us/azure/container-instances/quickstart-docker-cli). diff --git a/docs/licenses/LICENSE-AGPL_3_0 b/docs/licenses/LICENSE-AGPL_3_0 new file mode 100644 index 000000000..2beb9e163 --- /dev/null +++ b/docs/licenses/LICENSE-AGPL_3_0 @@ -0,0 +1,662 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. + diff --git a/docs/licenses/LICENSE-MIT_0 b/docs/licenses/LICENSE-MIT_0 new file mode 100644 index 000000000..fc06cc4fe --- /dev/null +++ b/docs/licenses/LICENSE-MIT_0 @@ -0,0 +1,14 @@ +MIT No Attribution + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/docs/media/flamegraph.svg b/docs/media/flamegraph.svg new file mode 100644 index 000000000..58387ee06 --- /dev/null +++ b/docs/media/flamegraph.svg @@ -0,0 +1,491 @@ +Flame Graph Reset ZoomSearch merge_sched_in (93 samples, 0.02%)event_sched_in (68 samples, 0.02%)perf_ibs_add (50 samples, 0.01%)perf_ibs_start (41 samples, 0.01%)ctx_sched_in (117 samples, 0.03%)visit_groups_merge.constprop.0.isra.0 (114 samples, 0.03%)finish_task_switch.isra.0 (122 samples, 0.03%)__perf_event_task_sched_in (119 samples, 0.03%)profiling (170 samples, 0.04%)ret_from_fork (126 samples, 0.03%)schedule_tail (126 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (81 samples, 0.02%)[[vdso]] (750 samples, 0.20%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (889 samples, 0.23%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (99 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (94 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (84 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (62 samples, 0.02%)[[vdso]] (968 samples, 0.26%)__GI___clock_gettime (58 samples, 0.02%)__memcpy_avx512_unaligned_erms (143 samples, 0.04%)_int_free (38 samples, 0.01%)_int_malloc (178 samples, 0.05%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (39 samples, 0.01%)epoll_wait (676 samples, 0.18%)tokio::runtime::context::with_scheduler (85 samples, 0.02%)core::option::Option<T>::map (65 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (60 samples, 0.02%)mio::poll::Poll::poll (84 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select (84 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (164 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (48 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (76 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (50 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (415 samples, 0.11%)core::sync::atomic::AtomicUsize::fetch_add (410 samples, 0.11%)core::sync::atomic::atomic_add (410 samples, 0.11%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (475 samples, 0.13%)tokio::runtime::driver::Handle::unpark (40 samples, 0.01%)tokio::runtime::driver::IoHandle::unpark (40 samples, 0.01%)__entry_text_start (99 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (180 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (169 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (124 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (123 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (60 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (53 samples, 0.01%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (53 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (122 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (75 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (287 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Parker::park (232 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (232 samples, 0.06%)core::cell::RefCell<T>::borrow_mut (67 samples, 0.02%)core::cell::RefCell<T>::try_borrow_mut (67 samples, 0.02%)core::cell::BorrowRefMut::new (67 samples, 0.02%)tokio::runtime::coop::budget (86 samples, 0.02%)tokio::runtime::coop::with_budget (86 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (80 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (372 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (240 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (133 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (103 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::dealloc (46 samples, 0.01%)core::mem::drop (41 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (41 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (77 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (210 samples, 0.06%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (210 samples, 0.06%)core::slice::<impl [T]>::contains (521 samples, 0.14%)<T as core::slice::cmp::SliceContains>::slice_contains (521 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (521 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (125 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (125 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (586 samples, 0.15%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (617 samples, 0.16%)tokio::runtime::scheduler::multi_thread::worker::Context::park (767 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (115 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (78 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (56 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (52 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (45 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (38 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (105 samples, 0.03%)core::num::<impl u32>::wrapping_add (129 samples, 0.03%)core::sync::atomic::AtomicU64::compare_exchange (138 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (138 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::pack (311 samples, 0.08%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (745 samples, 0.20%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (778 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (821 samples, 0.22%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::run (2,036 samples, 0.54%)tokio::runtime::context::runtime::enter_runtime (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (2,036 samples, 0.54%)tokio::runtime::context::set_scheduler (2,036 samples, 0.54%)std::thread::local::LocalKey<T>::with (2,036 samples, 0.54%)std::thread::local::LocalKey<T>::try_with (2,036 samples, 0.54%)tokio::runtime::context::set_scheduler::{{closure}} (2,036 samples, 0.54%)tokio::runtime::context::scoped::Scoped<T>::set (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::Context::run (2,036 samples, 0.54%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (2,422 samples, 0.64%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (2,422 samples, 0.64%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (58 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (58 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (2,584 samples, 0.68%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (160 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (138 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (2,724 samples, 0.72%)tokio::runtime::task::harness::poll_future::{{closure}} (2,724 samples, 0.72%)tokio::runtime::task::core::Core<T,S>::store_output (140 samples, 0.04%)tokio::runtime::task::harness::poll_future (2,796 samples, 0.74%)std::panic::catch_unwind (2,788 samples, 0.74%)std::panicking::try (2,788 samples, 0.74%)std::panicking::try::do_call (2,784 samples, 0.74%)core::mem::manually_drop::ManuallyDrop<T>::take (60 samples, 0.02%)core::ptr::read (60 samples, 0.02%)tokio::runtime::task::raw::poll (2,887 samples, 0.76%)tokio::runtime::task::harness::Harness<T,S>::poll (2,876 samples, 0.76%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (2,876 samples, 0.76%)tokio::runtime::task::state::State::transition_to_running (74 samples, 0.02%)tokio::runtime::task::state::State::fetch_update_action (74 samples, 0.02%)core::array::<impl core::default::Default for [T: 32]>::default (83 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (58 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (325 samples, 0.09%)tokio::runtime::time::Driver::park_internal (147 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (53 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (43 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (112 samples, 0.03%)alloc::vec::from_elem (42 samples, 0.01%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (42 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (42 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (42 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (42 samples, 0.01%)alloc::alloc::Global::alloc_impl (42 samples, 0.01%)alloc::alloc::alloc_zeroed (42 samples, 0.01%)__rdl_alloc_zeroed (42 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (42 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (318 samples, 0.08%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (73 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (61 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (85 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (80 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (63 samples, 0.02%)[[heap]] (8,241 samples, 2.18%)[..[[vdso]] (1,241 samples, 0.33%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (96 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (69 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (43 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_char (41 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (379 samples, 0.10%)alloc::string::String::push_str (45 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (45 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (45 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (45 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (66 samples, 0.02%)core::num::<impl u64>::rotate_left (48 samples, 0.01%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (148 samples, 0.04%)core::num::<impl u64>::wrapping_add (42 samples, 0.01%)core::hash::sip::u8to64_le (134 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (506 samples, 0.13%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (54 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (49 samples, 0.01%)core::ops::function::FnMut::call_mut (41 samples, 0.01%)tokio::runtime::coop::poll_proceed (41 samples, 0.01%)tokio::runtime::context::budget (41 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (41 samples, 0.01%)tokio::io::ready::Ready::intersection (48 samples, 0.01%)tokio::io::ready::Ready::from_interest (46 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (297 samples, 0.08%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (83 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (83 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (67 samples, 0.02%)core::result::Result<T,E>::is_err (375 samples, 0.10%)core::result::Result<T,E>::is_ok (375 samples, 0.10%)tokio::loom::std::mutex::Mutex<T>::lock (493 samples, 0.13%)std::sync::mutex::Mutex<T>::lock (463 samples, 0.12%)std::sys::sync::mutex::futex::Mutex::lock (443 samples, 0.12%)core::sync::atomic::AtomicU32::compare_exchange (51 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (51 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (1,400 samples, 0.37%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (50 samples, 0.01%)[[vdso]] (3,493 samples, 0.92%)[profiling] (68 samples, 0.02%)core::fmt::write (51 samples, 0.01%)__GI___clock_gettime (73 samples, 0.02%)__GI___libc_free (449 samples, 0.12%)arena_for_chunk (85 samples, 0.02%)arena_for_chunk (71 samples, 0.02%)heap_for_ptr (67 samples, 0.02%)heap_max_size (49 samples, 0.01%)__GI___libc_malloc (293 samples, 0.08%)__GI___lll_lock_wait_private (144 samples, 0.04%)futex_wait (95 samples, 0.03%)__GI___lll_lock_wake_private (479 samples, 0.13%)__GI___pthread_disable_asynccancel (90 samples, 0.02%)__GI_getsockname (1,281 samples, 0.34%)__libc_calloc (42 samples, 0.01%)__libc_recvfrom (121 samples, 0.03%)__libc_sendto (602 samples, 0.16%)__memchr_evex (56 samples, 0.01%)__memcmp_evex_movbe (1,539 samples, 0.41%)__memcpy_avx512_unaligned_erms (1,154 samples, 0.30%)__memset_avx512_unaligned_erms (1,515 samples, 0.40%)__posix_memalign (131 samples, 0.03%)__posix_memalign (85 samples, 0.02%)_mid_memalign (85 samples, 0.02%)_int_free (1,524 samples, 0.40%)_int_malloc (1,484 samples, 0.39%)_int_memalign (156 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (82 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (64 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (64 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (64 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (161 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_one (44 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (146 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_amortized (101 samples, 0.03%)alloc::raw_vec::finish_grow (199 samples, 0.05%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (166 samples, 0.04%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (57 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (373 samples, 0.10%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::send_response::{{closure}}> (61 samples, 0.02%)malloc_consolidate (373 samples, 0.10%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (62 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (62 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (62 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (62 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (46 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (46 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (46 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (40 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (40 samples, 0.01%)rand_chacha::guts::round (244 samples, 0.06%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right25 (45 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right25 (45 samples, 0.01%)core::core_arch::x86::avx2::_mm256_or_si256 (45 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (346 samples, 0.09%)rand_chacha::guts::refill_wide::fn_impl (345 samples, 0.09%)rand_chacha::guts::refill_wide_impl (345 samples, 0.09%)tokio::runtime::context::with_scheduler (45 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (50 samples, 0.01%)__entry_text_start (235 samples, 0.06%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (2,041 samples, 0.54%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (957 samples, 0.25%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (255 samples, 0.07%)core::sync::atomic::AtomicUsize::fetch_add (100 samples, 0.03%)core::sync::atomic::atomic_add (100 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (155 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (108 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (84 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id (38 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (38 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (38 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (43 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::remove (39 samples, 0.01%)torrust_tracker::servers::udp::handlers::RequestId::make (173 samples, 0.05%)__entry_text_start (171 samples, 0.05%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (293 samples, 0.08%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (287 samples, 0.08%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (230 samples, 0.06%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (230 samples, 0.06%)tokio::runtime::task::core::Core<T,S>::set_stage (456 samples, 0.12%)core::sync::atomic::AtomicUsize::fetch_xor (54 samples, 0.01%)core::sync::atomic::atomic_xor (54 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (73 samples, 0.02%)tokio::runtime::task::state::State::transition_to_complete (57 samples, 0.02%)std::sync::poison::Flag::done (103 samples, 0.03%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (136 samples, 0.04%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (136 samples, 0.04%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (136 samples, 0.04%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (242 samples, 0.06%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (60 samples, 0.02%)core::result::Result<T,E>::is_err (101 samples, 0.03%)core::result::Result<T,E>::is_ok (101 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (599 samples, 0.16%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (255 samples, 0.07%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (255 samples, 0.07%)tokio::loom::std::mutex::Mutex<T>::lock (252 samples, 0.07%)std::sync::mutex::Mutex<T>::lock (252 samples, 0.07%)std::sys::sync::mutex::futex::Mutex::lock (251 samples, 0.07%)core::sync::atomic::AtomicU32::compare_exchange (150 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (150 samples, 0.04%)std::sync::poison::Flag::done (100 samples, 0.03%)std::thread::panicking (55 samples, 0.01%)std::panicking::panicking (55 samples, 0.01%)std::panicking::panic_count::count_is_zero (55 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (173 samples, 0.05%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (173 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::unlock (73 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (293 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (293 samples, 0.08%)core::slice::<impl [T]>::contains (631 samples, 0.17%)<T as core::slice::cmp::SliceContains>::slice_contains (631 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (631 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (134 samples, 0.04%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (134 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (844 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (38 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (858 samples, 0.23%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (189 samples, 0.05%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (189 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::unlock (181 samples, 0.05%)core::sync::atomic::AtomicU32::swap (65 samples, 0.02%)core::sync::atomic::atomic_swap (65 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (44 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (43 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (38 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (253 samples, 0.07%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (42 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (42 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (48 samples, 0.01%)alloc::sync::Arc<T,A>::inner (48 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (48 samples, 0.01%)core::sync::atomic::AtomicU32::load (44 samples, 0.01%)core::sync::atomic::atomic_load (44 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (356 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (216 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (168 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (98 samples, 0.03%)core::sync::atomic::AtomicU64::load (54 samples, 0.01%)core::sync::atomic::atomic_load (54 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (1,635 samples, 0.43%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (667 samples, 0.18%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (66 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (61 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (96 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::run (1,751 samples, 0.46%)tokio::runtime::context::runtime::enter_runtime (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (1,751 samples, 0.46%)tokio::runtime::context::set_scheduler (1,751 samples, 0.46%)std::thread::local::LocalKey<T>::with (1,751 samples, 0.46%)std::thread::local::LocalKey<T>::try_with (1,751 samples, 0.46%)tokio::runtime::context::set_scheduler::{{closure}} (1,751 samples, 0.46%)tokio::runtime::context::scoped::Scoped<T>::set (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::Context::run (1,751 samples, 0.46%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (1,772 samples, 0.47%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (1,772 samples, 0.47%)tokio::runtime::task::raw::poll (1,805 samples, 0.48%)tokio::runtime::task::harness::Harness<T,S>::poll (1,787 samples, 0.47%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (1,787 samples, 0.47%)tokio::runtime::task::harness::poll_future (1,787 samples, 0.47%)std::panic::catch_unwind (1,787 samples, 0.47%)std::panicking::try (1,787 samples, 0.47%)std::panicking::try::do_call (1,787 samples, 0.47%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (1,787 samples, 0.47%)tokio::runtime::task::harness::poll_future::{{closure}} (1,787 samples, 0.47%)tokio::runtime::task::core::Core<T,S>::poll (1,787 samples, 0.47%)tokio::runtime::time::wheel::level::Level::next_expiration (54 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (72 samples, 0.02%)torrust_tracker::core::Tracker::send_stats_event::{{closure}} (50 samples, 0.01%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (38 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (157 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (157 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (157 samples, 0.04%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (89 samples, 0.02%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (47 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (265 samples, 0.07%)torrust_tracker::servers::udp::peer_builder::from_request (70 samples, 0.02%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (101 samples, 0.03%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (74 samples, 0.02%)core::sync::atomic::AtomicUsize::fetch_add (48 samples, 0.01%)core::sync::atomic::atomic_add (48 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (88 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (88 samples, 0.02%)core::result::Result<T,E>::map_err (52 samples, 0.01%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (269 samples, 0.07%)torrust_tracker::core::Tracker::announce::{{closure}} (308 samples, 0.08%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (372 samples, 0.10%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (448 samples, 0.12%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (73 samples, 0.02%)core::fmt::num::imp::fmt_u64 (69 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (41 samples, 0.01%)<T as alloc::string::ToString>::to_string (182 samples, 0.05%)core::option::Option<T>::expect (91 samples, 0.02%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (41 samples, 0.01%)<T as alloc::string::ToString>::to_string (41 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (450 samples, 0.12%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (1,319 samples, 0.35%)torrust_tracker::servers::udp::logging::log_response (83 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (1,808 samples, 0.48%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (293 samples, 0.08%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (283 samples, 0.07%)tokio::net::udp::UdpSocket::send_to::{{closure}} (265 samples, 0.07%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (227 samples, 0.06%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (124 samples, 0.03%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (61 samples, 0.02%)mio::net::udp::UdpSocket::send_to (61 samples, 0.02%)mio::io_source::IoSource<T>::do_io (61 samples, 0.02%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (61 samples, 0.02%)mio::net::udp::UdpSocket::send_to::{{closure}} (61 samples, 0.02%)std::net::udp::UdpSocket::send_to (61 samples, 0.02%)std::sys_common::net::UdpSocket::send_to (61 samples, 0.02%)std::sys::pal::unix::cvt (61 samples, 0.02%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (61 samples, 0.02%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count::to_usize::{{closure}} (84 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats::{{closure}} (84 samples, 0.02%)torrust_tracker_primitives::peer::Peer::is_seeder (84 samples, 0.02%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (173 samples, 0.05%)core::iter::traits::iterator::Iterator::sum (173 samples, 0.05%)<usize as core::iter::traits::accum::Sum>::sum (173 samples, 0.05%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (173 samples, 0.05%)core::iter::traits::iterator::Iterator::fold (173 samples, 0.05%)core::iter::adapters::map::map_fold::{{closure}} (85 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (337 samples, 0.09%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (156 samples, 0.04%)core::mem::drop (39 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (39 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (39 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (39 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (750 samples, 0.20%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (691 samples, 0.18%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (349 samples, 0.09%)core::option::Option<T>::is_some_and (106 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (105 samples, 0.03%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (101 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (101 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (61 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (84 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (84 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (181 samples, 0.05%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (181 samples, 0.05%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (181 samples, 0.05%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (181 samples, 0.05%)<u8 as core::slice::cmp::SliceOrd>::compare (181 samples, 0.05%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (76 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (551 samples, 0.15%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (506 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (506 samples, 0.13%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (572 samples, 0.15%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (751 samples, 0.20%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (64 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (83 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (83 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (216 samples, 0.06%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (216 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (216 samples, 0.06%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (216 samples, 0.06%)<u8 as core::slice::cmp::SliceOrd>::compare (216 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (87 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (539 samples, 0.14%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (527 samples, 0.14%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (501 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (501 samples, 0.13%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (640 samples, 0.17%)core::sync::atomic::AtomicU32::load (117 samples, 0.03%)core::sync::atomic::atomic_load (117 samples, 0.03%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (167 samples, 0.04%)std::sync::rwlock::RwLock<T>::read (162 samples, 0.04%)std::sys::sync::rwlock::futex::RwLock::read (158 samples, 0.04%)tracing::span::Span::log (82 samples, 0.02%)tracing::span::Span::record_all (143 samples, 0.04%)unlink_chunk (679 samples, 0.18%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (71 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (71 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (59 samples, 0.02%)rand::rng::Rng::gen (72 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (72 samples, 0.02%)rand::rng::Rng::gen (72 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (72 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (72 samples, 0.02%)[anon] (31,375 samples, 8.29%)[anon]uuid::v4::<impl uuid::Uuid>::new_v4 (90 samples, 0.02%)uuid::rng::bytes (87 samples, 0.02%)rand::random (87 samples, 0.02%)_int_free (938 samples, 0.25%)tcache_put (62 samples, 0.02%)hashbrown::raw::h2 (40 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (63 samples, 0.02%)hashbrown::raw::RawTableInner::find_or_find_insert_slot_inner (53 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (65 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (128 samples, 0.03%)[profiling] (1,452 samples, 0.38%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (89 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (70 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (42 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_char (41 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (49 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve (45 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (157 samples, 0.04%)alloc::string::String::push_str (107 samples, 0.03%)alloc::vec::Vec<T,A>::extend_from_slice (107 samples, 0.03%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (107 samples, 0.03%)alloc::vec::Vec<T,A>::append_elements (107 samples, 0.03%)core::num::<impl u64>::rotate_left (45 samples, 0.01%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (114 samples, 0.03%)core::hash::sip::u8to64_le (102 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (388 samples, 0.10%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (110 samples, 0.03%)core::array::<impl core::hash::Hash for [T: N]>::hash (109 samples, 0.03%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (109 samples, 0.03%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (93 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (93 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (93 samples, 0.02%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (135 samples, 0.04%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (202 samples, 0.05%)tokio::runtime::context::CONTEXT::__getit (59 samples, 0.02%)core::cell::Cell<T>::get (59 samples, 0.02%)__entry_text_start (42 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (164 samples, 0.04%)core::ops::function::FnMut::call_mut (149 samples, 0.04%)tokio::runtime::coop::poll_proceed (149 samples, 0.04%)tokio::runtime::context::budget (149 samples, 0.04%)std::thread::local::LocalKey<T>::try_with (149 samples, 0.04%)tokio::runtime::context::budget::{{closure}} (73 samples, 0.02%)tokio::runtime::coop::poll_proceed::{{closure}} (73 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (218 samples, 0.06%)__entry_text_start (55 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (89 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (89 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (65 samples, 0.02%)core::sync::atomic::AtomicU32::swap (41 samples, 0.01%)core::sync::atomic::atomic_swap (41 samples, 0.01%)std::sync::mutex::MutexGuard<T>::new (41 samples, 0.01%)std::sync::poison::Flag::guard (41 samples, 0.01%)std::thread::panicking (39 samples, 0.01%)std::panicking::panicking (39 samples, 0.01%)std::panicking::panic_count::count_is_zero (39 samples, 0.01%)core::result::Result<T,E>::is_err (337 samples, 0.09%)core::result::Result<T,E>::is_ok (337 samples, 0.09%)core::sync::atomic::AtomicU32::compare_exchange (60 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (60 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (483 samples, 0.13%)std::sync::mutex::Mutex<T>::lock (456 samples, 0.12%)std::sys::sync::mutex::futex::Mutex::lock (415 samples, 0.11%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (864 samples, 0.23%)__memcpy_avx512_unaligned_erms (223 samples, 0.06%)[profiling] (233 samples, 0.06%)binascii::bin2hex (128 samples, 0.03%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (404 samples, 0.11%)__entry_text_start (811 samples, 0.21%)[[vdso]] (12,846 samples, 3.40%)[[v..__GI___clock_gettime (141 samples, 0.04%)arena_for_chunk (178 samples, 0.05%)arena_for_chunk (151 samples, 0.04%)heap_for_ptr (116 samples, 0.03%)heap_max_size (50 samples, 0.01%)__GI___libc_free (827 samples, 0.22%)arena_for_chunk (86 samples, 0.02%)arena_for_chunk (67 samples, 0.02%)heap_for_ptr (39 samples, 0.01%)__GI___libc_malloc (618 samples, 0.16%)tcache_get (96 samples, 0.03%)__GI___libc_write (225 samples, 0.06%)__GI___libc_write (234 samples, 0.06%)__GI___lll_lock_wait_private (110 samples, 0.03%)futex_wait (66 samples, 0.02%)__GI___lll_lock_wake_private (138 samples, 0.04%)__GI___pthread_disable_asynccancel (209 samples, 0.06%)__GI___pthread_enable_asynccancel (46 samples, 0.01%)__entry_text_start (46 samples, 0.01%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (138 samples, 0.04%)__floattidf (183 samples, 0.05%)compiler_builtins::float::conv::__floattidf (172 samples, 0.05%)exp_inline (152 samples, 0.04%)log_inline (148 samples, 0.04%)__ieee754_pow_fma (333 samples, 0.09%)__libc_calloc (299 samples, 0.08%)__libc_recvfrom (1,422 samples, 0.38%)__libc_sendto (881 samples, 0.23%)__memcmp_evex_movbe (277 samples, 0.07%)__memcpy_avx512_unaligned_erms (4,073 samples, 1.08%)__posix_memalign (367 samples, 0.10%)__posix_memalign (216 samples, 0.06%)_mid_memalign (206 samples, 0.05%)arena_for_chunk (38 samples, 0.01%)__pow (75 samples, 0.02%)__entry_text_start (468 samples, 0.12%)_int_free (2,282 samples, 0.60%)tcache_put (139 samples, 0.04%)_int_malloc (2,521 samples, 0.67%)_int_memalign (241 samples, 0.06%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (125 samples, 0.03%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (165 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::set_ptr_and_cap (69 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (362 samples, 0.10%)alloc::raw_vec::RawVec<T,A>::grow_amortized (318 samples, 0.08%)alloc::raw_vec::finish_grow (233 samples, 0.06%)core::result::Result<T,E>::map_err (88 samples, 0.02%)core::mem::drop (70 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (70 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (70 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (61 samples, 0.02%)core::sync::atomic::AtomicU32::swap (59 samples, 0.02%)core::sync::atomic::atomic_swap (59 samples, 0.02%)alloc_new_heap (197 samples, 0.05%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (197 samples, 0.05%)core::fmt::Formatter::pad (45 samples, 0.01%)core::fmt::Formatter::pad_integral (93 samples, 0.02%)core::fmt::Formatter::pad_integral::write_prefix (47 samples, 0.01%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (662 samples, 0.17%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (346 samples, 0.09%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (578 samples, 0.15%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (54 samples, 0.01%)core::str::converts::from_utf8 (81 samples, 0.02%)core::str::validations::run_utf8_validation (70 samples, 0.02%)epoll_wait (87 samples, 0.02%)hashbrown::map::HashMap<K,V,S,A>::insert (52 samples, 0.01%)malloc_consolidate (109 samples, 0.03%)std::sys::pal::unix::time::Timespec::new (76 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (357 samples, 0.09%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (42 samples, 0.01%)core::cmp::PartialOrd::ge (42 samples, 0.01%)std::sys::pal::unix::time::Timespec::sub_timespec (202 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock_contended (233 samples, 0.06%)std::sys::sync::mutex::futex::Mutex::spin (67 samples, 0.02%)std::sys_common::net::TcpListener::socket_addr (85 samples, 0.02%)std::sys_common::net::sockname (80 samples, 0.02%)syscall (511 samples, 0.14%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (95 samples, 0.03%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (95 samples, 0.03%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (95 samples, 0.03%)core::cell::Cell<T>::set (95 samples, 0.03%)core::cell::Cell<T>::replace (95 samples, 0.03%)core::mem::replace (95 samples, 0.03%)core::ptr::write (95 samples, 0.03%)tokio::runtime::context::with_scheduler (369 samples, 0.10%)std::thread::local::LocalKey<T>::try_with (256 samples, 0.07%)tokio::runtime::context::with_scheduler::{{closure}} (255 samples, 0.07%)tokio::runtime::context::scoped::Scoped<T>::with (255 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (255 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (255 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (156 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (52 samples, 0.01%)tokio::io::ready::Ready::from_mio (40 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange (39 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (39 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (609 samples, 0.16%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (439 samples, 0.12%)__entry_text_start (200 samples, 0.05%)__entry_text_start (331 samples, 0.09%)__entry_text_start (74 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (1,520 samples, 0.40%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (828 samples, 0.22%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (279 samples, 0.07%)core::mem::drop (88 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (88 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (88 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (72 samples, 0.02%)core::sync::atomic::AtomicU32::swap (65 samples, 0.02%)core::sync::atomic::atomic_swap (65 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (69 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (66 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (48 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (566 samples, 0.15%)alloc::vec::Vec<T,A>::pop (77 samples, 0.02%)core::ptr::read (48 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (50 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (50 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (46 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (46 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (4,232 samples, 1.12%)core::sync::atomic::atomic_add (4,232 samples, 1.12%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (4,298 samples, 1.14%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (4,674 samples, 1.24%)__entry_text_start (67 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (210 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (107 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (84 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (82 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (120 samples, 0.03%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (48 samples, 0.01%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (48 samples, 0.01%)core::sync::atomic::AtomicUsize::load (48 samples, 0.01%)core::sync::atomic::atomic_load (48 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id (61 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (61 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (61 samples, 0.02%)std::sync::poison::Flag::done (462 samples, 0.12%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (540 samples, 0.14%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (540 samples, 0.14%)std::sys::sync::mutex::futex::Mutex::unlock (76 samples, 0.02%)core::sync::atomic::AtomicUsize::fetch_sub (91 samples, 0.02%)core::sync::atomic::atomic_sub (91 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (61 samples, 0.02%)core::result::Result<T,E>::is_err (88 samples, 0.02%)core::result::Result<T,E>::is_ok (88 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (884 samples, 0.23%)tokio::runtime::task::list::OwnedTasks<S>::remove (873 samples, 0.23%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (796 samples, 0.21%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (102 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (102 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (102 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock (96 samples, 0.03%)core::cell::RefCell<T>::borrow_mut (38 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (38 samples, 0.01%)core::cell::BorrowRefMut::new (38 samples, 0.01%)tokio::runtime::scheduler::defer::Defer::wake (86 samples, 0.02%)std::sys::pal::unix::futex::futex_wait (101 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (186 samples, 0.05%)std::sync::condvar::Condvar::wait (132 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait (130 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (130 samples, 0.03%)core::sync::atomic::AtomicUsize::compare_exchange (69 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (69 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (281 samples, 0.07%)tokio::runtime::driver::Driver::park (96 samples, 0.03%)tokio::runtime::driver::TimeDriver::park (96 samples, 0.03%)tokio::runtime::time::Driver::park (91 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Parker::park (627 samples, 0.17%)tokio::runtime::scheduler::multi_thread::park::Inner::park (627 samples, 0.17%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (1,130 samples, 0.30%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (62 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (84 samples, 0.02%)core::cell::RefCell<T>::try_borrow_mut (84 samples, 0.02%)core::cell::BorrowRefMut::new (84 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (250 samples, 0.07%)core::cell::RefCell<T>::try_borrow_mut (250 samples, 0.07%)core::cell::BorrowRefMut::new (250 samples, 0.07%)tokio::runtime::coop::budget (368 samples, 0.10%)tokio::runtime::coop::with_budget (368 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (318 samples, 0.08%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (82 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (600 samples, 0.16%)tokio::runtime::signal::Driver::process (79 samples, 0.02%)tokio::runtime::io::driver::signal::<impl tokio::runtime::io::driver::Driver>::consume_signal_ready (49 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (62 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (62 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker::core::Tracker>> (140 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (140 samples, 0.04%)core::sync::atomic::AtomicUsize::fetch_sub (91 samples, 0.02%)core::sync::atomic::atomic_sub (91 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (261 samples, 0.07%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (260 samples, 0.07%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (233 samples, 0.06%)tokio::runtime::task::core::Core<T,S>::set_stage (353 samples, 0.09%)core::sync::atomic::AtomicUsize::fetch_xor (127 samples, 0.03%)core::sync::atomic::atomic_xor (127 samples, 0.03%)tokio::runtime::task::state::State::transition_to_complete (135 samples, 0.04%)core::sync::atomic::AtomicUsize::fetch_sub (44 samples, 0.01%)core::sync::atomic::atomic_sub (44 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (242 samples, 0.06%)tokio::runtime::task::state::State::transition_to_terminal (67 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::dealloc (53 samples, 0.01%)std::sync::poison::Flag::done (203 samples, 0.05%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (272 samples, 0.07%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (272 samples, 0.07%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (272 samples, 0.07%)std::sys::sync::mutex::futex::Mutex::unlock (65 samples, 0.02%)core::sync::atomic::AtomicU32::swap (48 samples, 0.01%)core::sync::atomic::atomic_swap (48 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (78 samples, 0.02%)core::sync::atomic::atomic_add (78 samples, 0.02%)<tokio::runtime::task::Task<S> as tokio::util::linked_list::Link>::pointers (39 samples, 0.01%)tokio::runtime::task::core::Header::get_trailer (39 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (531 samples, 0.14%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (144 samples, 0.04%)core::result::Result<T,E>::is_err (40 samples, 0.01%)core::result::Result<T,E>::is_ok (40 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (1,157 samples, 0.31%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (431 samples, 0.11%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (430 samples, 0.11%)tokio::loom::std::mutex::Mutex<T>::lock (429 samples, 0.11%)std::sync::mutex::Mutex<T>::lock (429 samples, 0.11%)std::sys::sync::mutex::futex::Mutex::lock (425 samples, 0.11%)core::sync::atomic::AtomicU32::compare_exchange (385 samples, 0.10%)core::sync::atomic::atomic_compare_exchange (385 samples, 0.10%)tokio::runtime::task::raw::drop_abort_handle (184 samples, 0.05%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (167 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (167 samples, 0.04%)core::sync::atomic::AtomicUsize::compare_exchange (44 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (44 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (126 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (117 samples, 0.03%)tokio::runtime::task::state::State::unset_join_interested (76 samples, 0.02%)tokio::runtime::task::state::State::fetch_update (76 samples, 0.02%)core::result::Result<T,E>::is_err (53 samples, 0.01%)core::result::Result<T,E>::is_ok (53 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (87 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (82 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (82 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (82 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (56 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (60 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (78 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (74 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (74 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (74 samples, 0.02%)core::result::Result<T,E>::is_err (60 samples, 0.02%)core::result::Result<T,E>::is_ok (60 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::park (266 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (91 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (44 samples, 0.01%)core::sync::atomic::AtomicU64::compare_exchange (98 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (98 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (272 samples, 0.07%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (339 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (549 samples, 0.15%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::run (980 samples, 0.26%)tokio::runtime::context::runtime::enter_runtime (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (980 samples, 0.26%)tokio::runtime::context::set_scheduler (980 samples, 0.26%)std::thread::local::LocalKey<T>::with (980 samples, 0.26%)std::thread::local::LocalKey<T>::try_with (980 samples, 0.26%)tokio::runtime::context::set_scheduler::{{closure}} (980 samples, 0.26%)tokio::runtime::context::scoped::Scoped<T>::set (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::Context::run (980 samples, 0.26%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (1,050 samples, 0.28%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (1,047 samples, 0.28%)tokio::runtime::task::core::Core<T,S>::poll (1,075 samples, 0.28%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (1,099 samples, 0.29%)tokio::runtime::task::harness::poll_future::{{closure}} (1,099 samples, 0.29%)tokio::runtime::task::harness::poll_future (1,117 samples, 0.30%)std::panic::catch_unwind (1,115 samples, 0.29%)std::panicking::try (1,115 samples, 0.29%)std::panicking::try::do_call (1,114 samples, 0.29%)tokio::runtime::task::state::State::transition_to_running (377 samples, 0.10%)tokio::runtime::task::state::State::fetch_update_action (377 samples, 0.10%)tokio::runtime::task::raw::poll (1,566 samples, 0.41%)tokio::runtime::task::harness::Harness<T,S>::poll (1,533 samples, 0.41%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (1,525 samples, 0.40%)core::array::<impl core::default::Default for [T: 32]>::default (42 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (190 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (47 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (47 samples, 0.01%)tokio::runtime::time::source::TimeSource::instant_to_tick (43 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (73 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (50 samples, 0.01%)tokio::runtime::time::Driver::park_internal (346 samples, 0.09%)core::num::<impl u64>::rotate_right (51 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (441 samples, 0.12%)tokio::runtime::time::wheel::level::slot_range (99 samples, 0.03%)core::num::<impl usize>::pow (99 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (154 samples, 0.04%)tokio::runtime::time::wheel::level::slot_range (146 samples, 0.04%)core::num::<impl usize>::pow (146 samples, 0.04%)tokio::runtime::time::wheel::level::Level::next_expiration (833 samples, 0.22%)tokio::runtime::time::wheel::level::slot_range (161 samples, 0.04%)core::num::<impl usize>::pow (161 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (1,138 samples, 0.30%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (55 samples, 0.01%)core::option::Option<T>::is_some (55 samples, 0.01%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (112 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (79 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (48 samples, 0.01%)core::iter::traits::iterator::Iterator::collect (41 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (41 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (41 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (41 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (234 samples, 0.06%)std::hash::random::DefaultHasher::new (108 samples, 0.03%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (98 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (75 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (415 samples, 0.11%)core::sync::atomic::AtomicUsize::fetch_add (129 samples, 0.03%)core::sync::atomic::atomic_add (129 samples, 0.03%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (112 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (112 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_sub (49 samples, 0.01%)core::sync::atomic::atomic_sub (49 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (1,399 samples, 0.37%)<F as core::future::into_future::IntoFuture>::into_future (40 samples, 0.01%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (330 samples, 0.09%)core::sync::atomic::AtomicUsize::fetch_add (210 samples, 0.06%)core::sync::atomic::atomic_add (210 samples, 0.06%)torrust_tracker::servers::udp::handlers::handle_packet (80 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (212 samples, 0.06%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (212 samples, 0.06%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (321 samples, 0.08%)torrust_tracker::core::Tracker::announce::{{closure}} (434 samples, 0.11%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (635 samples, 0.17%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (56 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (986 samples, 0.26%)core::fmt::Formatter::new (50 samples, 0.01%)core::intrinsics::copy_nonoverlapping (39 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (208 samples, 0.05%)core::fmt::num::imp::fmt_u64 (188 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (131 samples, 0.03%)core::fmt::num::imp::fmt_u64 (119 samples, 0.03%)<T as alloc::string::ToString>::to_string (426 samples, 0.11%)core::option::Option<T>::expect (74 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (51 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (679 samples, 0.18%)<T as alloc::string::ToString>::to_string (106 samples, 0.03%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (72 samples, 0.02%)core::fmt::num::imp::fmt_u64 (58 samples, 0.02%)core::option::Option<T>::expect (38 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (2,539 samples, 0.67%)torrust_tracker::servers::udp::logging::log_response (198 samples, 0.05%)alloc::vec::from_elem (583 samples, 0.15%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (583 samples, 0.15%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (583 samples, 0.15%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (583 samples, 0.15%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (583 samples, 0.15%)alloc::alloc::Global::alloc_impl (583 samples, 0.15%)alloc::alloc::alloc_zeroed (583 samples, 0.15%)__rdl_alloc_zeroed (583 samples, 0.15%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (583 samples, 0.15%)__entry_text_start (110 samples, 0.03%)__entry_text_start (278 samples, 0.07%)std::sys::pal::unix::cvt (338 samples, 0.09%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (338 samples, 0.09%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (6,133 samples, 1.62%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (2,253 samples, 0.60%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (1,179 samples, 0.31%)tokio::net::udp::UdpSocket::send_to::{{closure}} (1,082 samples, 0.29%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (1,028 samples, 0.27%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (922 samples, 0.24%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (716 samples, 0.19%)mio::net::udp::UdpSocket::send_to (681 samples, 0.18%)mio::io_source::IoSource<T>::do_io (681 samples, 0.18%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (681 samples, 0.18%)mio::net::udp::UdpSocket::send_to::{{closure}} (681 samples, 0.18%)std::net::udp::UdpSocket::send_to (681 samples, 0.18%)std::sys_common::net::UdpSocket::send_to (661 samples, 0.17%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (38 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (38 samples, 0.01%)core::sync::atomic::atomic_add (38 samples, 0.01%)alloc::vec::Vec<T>::with_capacity (46 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (46 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (474 samples, 0.13%)tokio::net::udp::UdpSocket::ready::{{closure}} (454 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (655 samples, 0.17%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (79 samples, 0.02%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (58 samples, 0.02%)__rdl_alloc (49 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (49 samples, 0.01%)std::sys::pal::unix::alloc::aligned_malloc (49 samples, 0.01%)core::option::Option<T>::map (299 samples, 0.08%)tokio::task::spawn::spawn_inner::{{closure}} (299 samples, 0.08%)tokio::runtime::scheduler::Handle::spawn (299 samples, 0.08%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (299 samples, 0.08%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (299 samples, 0.08%)tokio::runtime::task::list::OwnedTasks<S>::bind (287 samples, 0.08%)tokio::runtime::task::new_task (278 samples, 0.07%)tokio::runtime::task::raw::RawTask::new (278 samples, 0.07%)tokio::runtime::task::core::Cell<T,S>::new (278 samples, 0.07%)alloc::boxed::Box<T>::new (56 samples, 0.01%)alloc::alloc::exchange_malloc (56 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (56 samples, 0.01%)alloc::alloc::Global::alloc_impl (56 samples, 0.01%)alloc::alloc::alloc (56 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (1,073 samples, 0.28%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (1,073 samples, 0.28%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (300 samples, 0.08%)tokio::task::spawn::spawn (300 samples, 0.08%)tokio::task::spawn::spawn_inner (300 samples, 0.08%)tokio::runtime::context::current::with_current (300 samples, 0.08%)std::thread::local::LocalKey<T>::try_with (300 samples, 0.08%)tokio::runtime::context::current::with_current::{{closure}} (300 samples, 0.08%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (81 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (62 samples, 0.02%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (353 samples, 0.09%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (273 samples, 0.07%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (191 samples, 0.05%)core::option::Option<T>::is_some_and (46 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (45 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (44 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (44 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (61 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (48 samples, 0.01%)core::sync::atomic::AtomicU32::load (46 samples, 0.01%)core::sync::atomic::atomic_load (46 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (74 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (68 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (65 samples, 0.02%)tracing::span::Span::log (64 samples, 0.02%)core::fmt::Arguments::new_v1 (39 samples, 0.01%)tracing_core::span::Record::is_empty (67 samples, 0.02%)tracing_core::field::ValueSet::is_empty (67 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::all (43 samples, 0.01%)tracing::span::Span::record_all (253 samples, 0.07%)unlink_chunk (517 samples, 0.14%)uuid::builder::Builder::with_variant (112 samples, 0.03%)__entry_text_start (86 samples, 0.02%)uuid::builder::Builder::from_random_bytes (150 samples, 0.04%)uuid::builder::Builder::with_version (38 samples, 0.01%)__entry_text_start (187 samples, 0.05%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (433 samples, 0.11%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (433 samples, 0.11%)rand::rng::Rng::gen (445 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (445 samples, 0.12%)rand::rng::Rng::gen (445 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (445 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (445 samples, 0.12%)[unknown] (62,585 samples, 16.54%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (699 samples, 0.18%)uuid::rng::bytes (533 samples, 0.14%)rand::random (533 samples, 0.14%)__entry_text_start (59 samples, 0.02%)__GI___libc_malloc (138 samples, 0.04%)__memcpy_avx512_unaligned_erms (107 samples, 0.03%)_int_free (89 samples, 0.02%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (41 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (73 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (64 samples, 0.02%)__perf_event_task_sched_in (42 samples, 0.01%)ctx_sched_in (42 samples, 0.01%)visit_groups_merge.constprop.0.isra.0 (42 samples, 0.01%)__x64_sys_futex (45 samples, 0.01%)do_futex (45 samples, 0.01%)futex_wait (45 samples, 0.01%)futex_wait_queue (45 samples, 0.01%)schedule (45 samples, 0.01%)__schedule (45 samples, 0.01%)finish_task_switch.isra.0 (45 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (48 samples, 0.01%)syscall (48 samples, 0.01%)entry_SYSCALL_64_after_hwframe (48 samples, 0.01%)do_syscall_64 (48 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (50 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (50 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (50 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock_contended (50 samples, 0.01%)[[vdso]] (49 samples, 0.01%)[[vdso]] (43 samples, 0.01%)[[vdso]] (1,196 samples, 0.32%)__pow (1,270 samples, 0.34%)std::f64::<impl f64>::powf (1,334 samples, 0.35%)std::time::Instant::now (43 samples, 0.01%)std::sys::pal::unix::time::Instant::now (43 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (1,559 samples, 0.41%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (47 samples, 0.01%)std::time::Instant::now (45 samples, 0.01%)std::sys::pal::unix::time::Instant::now (45 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (40 samples, 0.01%)ep_item_poll.isra.0 (48 samples, 0.01%)ep_send_events (86 samples, 0.02%)__x64_sys_epoll_wait (135 samples, 0.04%)do_epoll_wait (131 samples, 0.03%)ep_poll (124 samples, 0.03%)mio::poll::Poll::poll (144 samples, 0.04%)mio::sys::unix::selector::epoll::Selector::select (144 samples, 0.04%)epoll_wait (143 samples, 0.04%)entry_SYSCALL_64_after_hwframe (140 samples, 0.04%)do_syscall_64 (137 samples, 0.04%)tokio::runtime::io::driver::Driver::turn (157 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (169 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (168 samples, 0.04%)tokio::runtime::driver::Driver::park_timeout (168 samples, 0.04%)tokio::runtime::driver::TimeDriver::park_timeout (168 samples, 0.04%)tokio::runtime::time::Driver::park_timeout (168 samples, 0.04%)tokio::runtime::time::Driver::park_internal (161 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (244 samples, 0.06%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (149 samples, 0.04%)alloc::sync::Arc<T,A>::inner (149 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (149 samples, 0.04%)core::result::Result<T,E>::is_ok (44 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange (43 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (43 samples, 0.01%)core::bool::<impl bool>::then (63 samples, 0.02%)__x64_sys_futex (297 samples, 0.08%)futex_setup_timer (44 samples, 0.01%)_raw_spin_unlock (43 samples, 0.01%)futex_unqueue (124 samples, 0.03%)__futex_queue (105 samples, 0.03%)plist_add (143 samples, 0.04%)_raw_spin_lock (582 samples, 0.15%)clear_buddies (87 samples, 0.02%)__update_load_avg_cfs_rq (68 samples, 0.02%)__update_load_avg_se (74 samples, 0.02%)clear_buddies (130 samples, 0.03%)update_cfs_group (489 samples, 0.13%)reweight_entity (204 samples, 0.05%)__calc_delta (373 samples, 0.10%)__cgroup_account_cputime (50 samples, 0.01%)cpuacct_charge (423 samples, 0.11%)update_curr (1,525 samples, 0.40%)update_min_vruntime (71 samples, 0.02%)__update_load_avg_cfs_rq (489 samples, 0.13%)__update_load_avg_se (418 samples, 0.11%)update_load_avg (1,435 samples, 0.38%)dequeue_entity (4,560 samples, 1.21%)update_min_vruntime (146 samples, 0.04%)update_cfs_group (222 samples, 0.06%)update_curr (42 samples, 0.01%)dequeue_task_fair (5,294 samples, 1.40%)update_min_vruntime (65 samples, 0.02%)dequeue_task (5,351 samples, 1.41%)dequeue_task_fair (63 samples, 0.02%)_raw_spin_unlock (82 samples, 0.02%)__rcu_read_unlock (179 samples, 0.05%)perf_ibs_add (421 samples, 0.11%)perf_ibs_start (364 samples, 0.10%)perf_event_update_userpage (129 samples, 0.03%)event_sched_in (736 samples, 0.19%)merge_sched_in (889 samples, 0.23%)perf_pmu_nop_int (88 samples, 0.02%)ctx_sched_in (1,272 samples, 0.34%)visit_groups_merge.constprop.0.isra.0 (1,233 samples, 0.33%)rb_next (161 samples, 0.04%)perf_ctx_enable (43 samples, 0.01%)perf_ctx_sched_task_cb (68 samples, 0.02%)perf_pmu_nop_void (75 samples, 0.02%)__perf_event_task_sched_in (1,616 samples, 0.43%)__rcu_read_unlock (40 samples, 0.01%)_raw_spin_unlock (100 samples, 0.03%)finish_task_switch.isra.0 (2,202 samples, 0.58%)pick_next_task_fair (308 samples, 0.08%)newidle_balance (193 samples, 0.05%)__rcu_read_lock (39 samples, 0.01%)pick_next_task_idle (192 samples, 0.05%)__update_idle_core (128 samples, 0.03%)put_prev_entity (53 samples, 0.01%)check_cfs_rq_runtime (38 samples, 0.01%)check_spread.isra.0 (133 samples, 0.04%)pick_next_task (1,105 samples, 0.29%)put_prev_task_fair (422 samples, 0.11%)put_prev_entity (169 samples, 0.04%)__rcu_read_unlock (39 samples, 0.01%)_raw_spin_lock (91 samples, 0.02%)perf_ibs_del (737 samples, 0.19%)perf_ibs_stop (677 samples, 0.18%)native_read_msr (236 samples, 0.06%)event_sched_out (785 samples, 0.21%)__pmu_ctx_sched_out (1,001 samples, 0.26%)group_sched_out (928 samples, 0.25%)perf_ibs_del (73 samples, 0.02%)ctx_sched_out (1,285 samples, 0.34%)sched_clock_cpu (225 samples, 0.06%)sched_clock (194 samples, 0.05%)native_sched_clock (194 samples, 0.05%)perf_ctx_disable (116 samples, 0.03%)perf_ctx_sched_task_cb (85 samples, 0.02%)perf_pmu_nop_void (71 samples, 0.02%)__perf_event_task_sched_out (1,815 samples, 0.48%)perf_event_context_sched_out (1,620 samples, 0.43%)prepare_task_switch (1,888 samples, 0.50%)psi_group_change (82 samples, 0.02%)psi_group_change (1,810 samples, 0.48%)record_times (50 samples, 0.01%)record_times (90 samples, 0.02%)psi_task_switch (2,481 samples, 0.66%)sched_clock_cpu (300 samples, 0.08%)sched_clock (256 samples, 0.07%)native_sched_clock (256 samples, 0.07%)put_prev_task_fair (111 samples, 0.03%)rcu_note_context_switch (60 samples, 0.02%)__schedule (14,876 samples, 3.93%)__sc..update_rq_clock (114 samples, 0.03%)sched_clock_cpu (81 samples, 0.02%)sched_clock (66 samples, 0.02%)native_sched_clock (66 samples, 0.02%)futex_wait_queue (15,388 samples, 4.07%)fute..schedule (15,047 samples, 3.98%)sche..__get_user_nocheck_4 (99 samples, 0.03%)_raw_spin_lock (50 samples, 0.01%)futex_hash (258 samples, 0.07%)futex_q_lock (294 samples, 0.08%)futex_q_unlock (47 samples, 0.01%)futex_wait_setup (1,137 samples, 0.30%)get_futex_key (137 samples, 0.04%)get_futex_key (71 samples, 0.02%)futex_wait (17,298 samples, 4.57%)futex..schedule (42 samples, 0.01%)do_futex (17,499 samples, 4.62%)do_fu..__x64_sys_futex (17,785 samples, 4.70%)__x64..do_futex (50 samples, 0.01%)__put_user_8 (417 samples, 0.11%)__get_user_8 (293 samples, 0.08%)__rseq_handle_notify_resume (2,024 samples, 0.53%)rseq_ip_fixup (472 samples, 0.12%)rseq_get_rseq_cs.isra.0 (88 samples, 0.02%)blkcg_maybe_throttle_current (123 samples, 0.03%)mem_cgroup_handle_over_high (234 samples, 0.06%)exit_to_user_mode_loop (2,785 samples, 0.74%)mem_cgroup_handle_over_high (59 samples, 0.02%)exit_to_user_mode_prepare (3,301 samples, 0.87%)switch_fpu_return (62 samples, 0.02%)fpregs_assert_state_consistent (42 samples, 0.01%)do_syscall_64 (21,396 samples, 5.65%)do_sysc..syscall_exit_to_user_mode (3,407 samples, 0.90%)entry_SYSCALL_64_after_hwframe (21,799 samples, 5.76%)entry_S..std::sys::pal::unix::futex::futex_wait (22,733 samples, 6.01%)std::sys..syscall (22,455 samples, 5.93%)syscallsyscall_return_via_sysret (192 samples, 0.05%)core::result::Result<T,E>::is_err (236 samples, 0.06%)core::result::Result<T,E>::is_ok (236 samples, 0.06%)std::sync::condvar::Condvar::wait (22,990 samples, 6.08%)std::syn..std::sys::sync::condvar::futex::Condvar::wait (22,989 samples, 6.08%)std::sys..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (22,989 samples, 6.08%)std::sys..std::sys::sync::mutex::futex::Mutex::lock (256 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (23,237 samples, 6.14%)tokio::r..tokio::loom::std::mutex::Mutex<T>::lock (175 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (86 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (82 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (82 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (82 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (127 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (102 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park (102 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (58 samples, 0.02%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (104 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (41 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (204 samples, 0.05%)tokio::runtime::time::wheel::level::slot_range (48 samples, 0.01%)core::num::<impl usize>::pow (48 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (389 samples, 0.10%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (61 samples, 0.02%)core::option::Option<T>::is_some (61 samples, 0.02%)tokio::runtime::time::wheel::level::Level::next_expiration (60 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (597 samples, 0.16%)tokio::runtime::time::wheel::Wheel::poll_at (82 samples, 0.02%)tokio::runtime::time::wheel::Wheel::next_expiration (70 samples, 0.02%)core::option::Option<T>::map (163 samples, 0.04%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (174 samples, 0.05%)core::result::Result<T,E>::map (82 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (82 samples, 0.02%)alloc::vec::Vec<T,A>::set_len (51 samples, 0.01%)[[vdso]] (152 samples, 0.04%)__GI___pthread_disable_asynccancel (86 samples, 0.02%)__x64_sys_epoll_wait (99 samples, 0.03%)__fget_light (521 samples, 0.14%)__fdget (595 samples, 0.16%)__rcu_read_unlock (60 samples, 0.02%)__put_user_nocheck_4 (916 samples, 0.24%)__put_user_nocheck_8 (1,566 samples, 0.41%)_raw_write_lock_irq (418 samples, 0.11%)queued_write_lock_slowpath (97 samples, 0.03%)_raw_write_unlock_irq (119 samples, 0.03%)ep_done_scan (55 samples, 0.01%)__pm_relax (54 samples, 0.01%)_raw_write_lock_irq (1,420 samples, 0.38%)queued_write_lock_slowpath (237 samples, 0.06%)_raw_write_unlock_irq (256 samples, 0.07%)_raw_write_lock_irq (1,302 samples, 0.34%)queued_write_lock_slowpath (356 samples, 0.09%)_raw_write_unlock_irq (135 samples, 0.04%)ep_done_scan (1,745 samples, 0.46%)eventfd_poll (107 samples, 0.03%)sock_poll (4,183 samples, 1.11%)udp_poll (3,029 samples, 0.80%)datagram_poll (2,364 samples, 0.62%)ep_item_poll.isra.0 (4,869 samples, 1.29%)udp_poll (92 samples, 0.02%)mutex_lock (697 samples, 0.18%)ep_send_events (9,657 samples, 2.55%)ep..sock_poll (71 samples, 0.02%)mutex_unlock (1,230 samples, 0.33%)_raw_spin_lock_irqsave (40 samples, 0.01%)hrtimer_init_sleeper (55 samples, 0.01%)__hrtimer_init (47 samples, 0.01%)enqueue_hrtimer (76 samples, 0.02%)timerqueue_add (55 samples, 0.01%)__hrtimer_start_range_ns (185 samples, 0.05%)_raw_spin_lock_irqsave (98 samples, 0.03%)__raw_spin_lock_irqsave (98 samples, 0.03%)hrtimer_start_range_ns (349 samples, 0.09%)rb_erase (52 samples, 0.01%)__remove_hrtimer (120 samples, 0.03%)_raw_spin_lock_irqsave (125 samples, 0.03%)__raw_spin_lock_irqsave (123 samples, 0.03%)hrtimer_try_to_cancel (380 samples, 0.10%)_raw_spin_lock (103 samples, 0.03%)clear_buddies (64 samples, 0.02%)__update_load_avg_cfs_rq (53 samples, 0.01%)__update_load_avg_se (58 samples, 0.02%)clear_buddies (87 samples, 0.02%)update_cfs_group (299 samples, 0.08%)reweight_entity (137 samples, 0.04%)__calc_delta (268 samples, 0.07%)__cgroup_account_cputime (46 samples, 0.01%)cpuacct_charge (227 samples, 0.06%)update_curr (885 samples, 0.23%)update_min_vruntime (41 samples, 0.01%)__update_load_avg_cfs_rq (348 samples, 0.09%)__update_load_avg_se (251 samples, 0.07%)update_load_avg (968 samples, 0.26%)dequeue_entity (2,967 samples, 0.78%)update_min_vruntime (92 samples, 0.02%)update_cfs_group (119 samples, 0.03%)dequeue_task_fair (3,442 samples, 0.91%)dequeue_task (3,487 samples, 0.92%)dequeue_task_fair (42 samples, 0.01%)_raw_spin_unlock (55 samples, 0.01%)perf_ibs_add (51 samples, 0.01%)perf_ibs_start (42 samples, 0.01%)event_sched_in (79 samples, 0.02%)merge_sched_in (116 samples, 0.03%)ctx_sched_in (250 samples, 0.07%)visit_groups_merge.constprop.0.isra.0 (226 samples, 0.06%)rb_next (57 samples, 0.02%)perf_ctx_sched_task_cb (49 samples, 0.01%)perf_pmu_nop_void (45 samples, 0.01%)__perf_event_task_sched_in (505 samples, 0.13%)_raw_spin_unlock (65 samples, 0.02%)finish_task_switch.isra.0 (876 samples, 0.23%)newidle_balance (124 samples, 0.03%)pick_next_task_fair (200 samples, 0.05%)pick_next_task_idle (154 samples, 0.04%)__update_idle_core (95 samples, 0.03%)put_prev_entity (44 samples, 0.01%)check_spread.isra.0 (78 samples, 0.02%)pick_next_task (735 samples, 0.19%)put_prev_task_fair (269 samples, 0.07%)put_prev_entity (106 samples, 0.03%)_raw_spin_lock (72 samples, 0.02%)perf_ibs_del (525 samples, 0.14%)perf_ibs_stop (478 samples, 0.13%)native_read_msr (186 samples, 0.05%)event_sched_out (557 samples, 0.15%)__pmu_ctx_sched_out (682 samples, 0.18%)group_sched_out (639 samples, 0.17%)perf_ibs_del (52 samples, 0.01%)ctx_sched_out (932 samples, 0.25%)sched_clock_cpu (202 samples, 0.05%)sched_clock (184 samples, 0.05%)native_sched_clock (184 samples, 0.05%)perf_ctx_disable (49 samples, 0.01%)perf_ctx_sched_task_cb (56 samples, 0.01%)__perf_event_task_sched_out (1,260 samples, 0.33%)perf_event_context_sched_out (1,125 samples, 0.30%)prepare_task_switch (1,336 samples, 0.35%)psi_group_change (71 samples, 0.02%)psi_group_change (1,302 samples, 0.34%)record_times (58 samples, 0.02%)record_times (60 samples, 0.02%)psi_task_switch (1,806 samples, 0.48%)sched_clock_cpu (152 samples, 0.04%)sched_clock (134 samples, 0.04%)native_sched_clock (134 samples, 0.04%)put_prev_task_fair (57 samples, 0.02%)__schedule (9,010 samples, 2.38%)__..update_rq_clock (151 samples, 0.04%)sched_clock_cpu (126 samples, 0.03%)sched_clock (113 samples, 0.03%)native_sched_clock (113 samples, 0.03%)schedule_hrtimeout_range (10,138 samples, 2.68%)sc..schedule_hrtimeout_range_clock (10,108 samples, 2.67%)sc..schedule (9,099 samples, 2.40%)sc..ktime_get_ts64 (80 samples, 0.02%)read_tsc (561 samples, 0.15%)select_estimate_accuracy (800 samples, 0.21%)ep_poll (25,387 samples, 6.71%)ep_pollschedule_hrtimeout_range (48 samples, 0.01%)do_epoll_wait (26,191 samples, 6.92%)do_epoll_..fput (140 samples, 0.04%)ktime_get_ts64 (245 samples, 0.06%)read_tsc (625 samples, 0.17%)__x64_sys_epoll_wait (27,553 samples, 7.28%)__x64_sys_..__put_user_8 (200 samples, 0.05%)__get_user_8 (189 samples, 0.05%)__rseq_handle_notify_resume (1,198 samples, 0.32%)rseq_ip_fixup (299 samples, 0.08%)rseq_get_rseq_cs.isra.0 (38 samples, 0.01%)mem_cgroup_handle_over_high (38 samples, 0.01%)exit_to_user_mode_loop (1,413 samples, 0.37%)exit_to_user_mode_prepare (1,731 samples, 0.46%)syscall_exit_to_user_mode (1,821 samples, 0.48%)do_syscall_64 (29,522 samples, 7.80%)do_syscall_..entry_SYSCALL_64_after_hwframe (29,687 samples, 7.85%)entry_SYSCA..syscall_return_via_sysret (120 samples, 0.03%)epoll_wait (30,473 samples, 8.05%)epoll_waitmio::poll::Poll::poll (30,680 samples, 8.11%)mio::poll::..mio::sys::unix::selector::epoll::Selector::select (30,680 samples, 8.11%)mio::sys::u..tokio::io::ready::Ready::from_mio (42 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (398 samples, 0.11%)tokio::util::bit::Pack::pack (313 samples, 0.08%)core::result::Result<T,E>::is_err (61 samples, 0.02%)core::result::Result<T,E>::is_ok (61 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (119 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (117 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock (92 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (31,755 samples, 8.39%)tokio::runti..tokio::runtime::io::scheduled_io::ScheduledIo::wake (430 samples, 0.11%)__GI___clock_gettime (38 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (41 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (52 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (52 samples, 0.01%)tokio::time::clock::Clock::now (43 samples, 0.01%)tokio::time::clock::now (43 samples, 0.01%)std::time::Instant::now (43 samples, 0.01%)std::sys::pal::unix::time::Instant::now (43 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (45 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (56 samples, 0.01%)tokio::time::clock::Clock::now (49 samples, 0.01%)tokio::time::clock::now (49 samples, 0.01%)std::time::Instant::now (49 samples, 0.01%)std::sys::pal::unix::time::Instant::now (49 samples, 0.01%)tokio::runtime::time::Driver::park_internal (31,983 samples, 8.45%)tokio::runti..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (32,791 samples, 8.67%)tokio::runti..tokio::runtime::driver::Driver::park (32,760 samples, 8.66%)tokio::runti..tokio::runtime::driver::TimeDriver::park (32,760 samples, 8.66%)tokio::runti..tokio::runtime::time::Driver::park (32,760 samples, 8.66%)tokio::runti..tokio::runtime::scheduler::multi_thread::park::Parker::park (56,209 samples, 14.86%)tokio::runtime::schedul..tokio::runtime::scheduler::multi_thread::park::Inner::park (56,209 samples, 14.86%)tokio::runtime::schedul..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (56,281 samples, 14.87%)tokio::runtime::schedul..core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (121 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (121 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::unlock (115 samples, 0.03%)std::sync::mutex::MutexGuard<T>::new (121 samples, 0.03%)std::sync::poison::Flag::guard (117 samples, 0.03%)std::thread::panicking (114 samples, 0.03%)std::panicking::panicking (114 samples, 0.03%)std::panicking::panic_count::count_is_zero (114 samples, 0.03%)core::sync::atomic::AtomicUsize::load (108 samples, 0.03%)core::sync::atomic::atomic_load (108 samples, 0.03%)core::result::Result<T,E>::is_err (207 samples, 0.05%)core::result::Result<T,E>::is_ok (207 samples, 0.05%)core::sync::atomic::AtomicU32::compare_exchange (128 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (128 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (599 samples, 0.16%)std::sync::mutex::Mutex<T>::lock (594 samples, 0.16%)std::sys::sync::mutex::futex::Mutex::lock (473 samples, 0.13%)std::sys::sync::mutex::futex::Mutex::lock_contended (134 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::spin (96 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (806 samples, 0.21%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (80 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (80 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (48 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (589 samples, 0.16%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (589 samples, 0.16%)core::slice::<impl [T]>::contains (1,373 samples, 0.36%)<T as core::slice::cmp::SliceContains>::slice_contains (1,373 samples, 0.36%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (1,373 samples, 0.36%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (309 samples, 0.08%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (309 samples, 0.08%)core::result::Result<T,E>::is_err (50 samples, 0.01%)core::result::Result<T,E>::is_ok (50 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (59 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (59 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (1,625 samples, 0.43%)tokio::loom::std::mutex::Mutex<T>::lock (169 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (168 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (136 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (1,675 samples, 0.44%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (95 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (95 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::unlock (88 samples, 0.02%)core::result::Result<T,E>::is_err (163 samples, 0.04%)core::result::Result<T,E>::is_ok (163 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (342 samples, 0.09%)std::sync::mutex::Mutex<T>::lock (341 samples, 0.09%)std::sys::sync::mutex::futex::Mutex::lock (318 samples, 0.08%)std::sys::sync::mutex::futex::Mutex::lock_contended (117 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::spin (81 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (512 samples, 0.14%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (67 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (57 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (46 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock_contended (61 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::spin (53 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (88 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (88 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (78 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (287 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (201 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (201 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (100 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (98 samples, 0.03%)core::sync::atomic::atomic_add (98 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::park (60,093 samples, 15.88%)tokio::runtime::schedule..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (898 samples, 0.24%)core::cell::RefCell<T>::borrow_mut (81 samples, 0.02%)core::cell::RefCell<T>::try_borrow_mut (81 samples, 0.02%)core::cell::BorrowRefMut::new (81 samples, 0.02%)tokio::runtime::context::budget (60 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (60 samples, 0.02%)[[vdso]] (62 samples, 0.02%)__memcpy_avx512_unaligned_erms (434 samples, 0.11%)__memcpy_avx512_unaligned_erms (591 samples, 0.16%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (596 samples, 0.16%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (596 samples, 0.16%)std::panic::catch_unwind (1,053 samples, 0.28%)std::panicking::try (1,053 samples, 0.28%)std::panicking::try::do_call (1,053 samples, 0.28%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (1,053 samples, 0.28%)core::ops::function::FnOnce::call_once (1,053 samples, 0.28%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (1,053 samples, 0.28%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (1,053 samples, 0.28%)tokio::runtime::task::core::Core<T,S>::set_stage (1,041 samples, 0.28%)<core::option::Option<T> as core::ops::try_trait::Try>::branch (38 samples, 0.01%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (76 samples, 0.02%)core::result::Result<T,E>::is_err (750 samples, 0.20%)core::result::Result<T,E>::is_ok (750 samples, 0.20%)tokio::runtime::task::harness::Harness<T,S>::complete (2,086 samples, 0.55%)tokio::runtime::task::harness::Harness<T,S>::release (1,033 samples, 0.27%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (1,023 samples, 0.27%)tokio::runtime::task::list::OwnedTasks<S>::remove (1,023 samples, 0.27%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (892 samples, 0.24%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (788 samples, 0.21%)tokio::loom::std::mutex::Mutex<T>::lock (775 samples, 0.20%)std::sync::mutex::Mutex<T>::lock (775 samples, 0.20%)std::sys::sync::mutex::futex::Mutex::lock (771 samples, 0.20%)core::cell::RefCell<T>::borrow_mut (42 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (42 samples, 0.01%)core::cell::BorrowRefMut::new (42 samples, 0.01%)tokio::runtime::coop::budget (46 samples, 0.01%)tokio::runtime::coop::with_budget (46 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (86 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (212 samples, 0.06%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (71 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (139 samples, 0.04%)core::sync::atomic::AtomicUsize::fetch_add (115 samples, 0.03%)core::sync::atomic::atomic_add (115 samples, 0.03%)__memcpy_avx512_unaligned_erms (138 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (174 samples, 0.05%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (174 samples, 0.05%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker::core::Tracker>> (282 samples, 0.07%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (282 samples, 0.07%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (53 samples, 0.01%)std::io::cursor::Cursor<T>::remaining_slice (58 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (103 samples, 0.03%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (45 samples, 0.01%)byteorder::io::ReadBytesExt::read_i32 (108 samples, 0.03%)std::io::cursor::Cursor<T>::remaining_slice (55 samples, 0.01%)byteorder::io::ReadBytesExt::read_i64 (57 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (57 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (1,438 samples, 0.38%)__GI___lll_lock_wait_private (55 samples, 0.01%)futex_wait (38 samples, 0.01%)__x64_sys_futex (128 samples, 0.03%)_raw_spin_lock (67 samples, 0.02%)futex_hash (251 samples, 0.07%)_raw_spin_lock (190 samples, 0.05%)native_queued_spin_lock_slowpath (188 samples, 0.05%)futex_wake_mark (67 samples, 0.02%)get_futex_key (210 samples, 0.06%)_raw_spin_lock_irqsave (45 samples, 0.01%)__raw_spin_lock_irqsave (44 samples, 0.01%)__smp_call_single_queue (117 samples, 0.03%)send_call_function_single_ipi (107 samples, 0.03%)native_send_call_func_single_ipi (64 samples, 0.02%)default_send_IPI_single_phys (64 samples, 0.02%)__default_send_IPI_dest_field (54 samples, 0.01%)llist_add_batch (86 samples, 0.02%)ttwu_queue_wakelist (259 samples, 0.07%)futex_wake (1,916 samples, 0.51%)wake_up_q (702 samples, 0.19%)try_to_wake_up (671 samples, 0.18%)do_futex (2,387 samples, 0.63%)__x64_sys_futex (2,616 samples, 0.69%)futex_wake (53 samples, 0.01%)do_futex (38 samples, 0.01%)exit_to_user_mode_prepare (140 samples, 0.04%)do_syscall_64 (2,881 samples, 0.76%)syscall_exit_to_user_mode (189 samples, 0.05%)entry_SYSCALL_64_after_hwframe (3,055 samples, 0.81%)__GI___lll_lock_wake_private (3,294 samples, 0.87%)__x64_sys_futex (43 samples, 0.01%)plist_add (66 samples, 0.02%)update_cfs_group (57 samples, 0.02%)__calc_delta (40 samples, 0.01%)cpuacct_charge (53 samples, 0.01%)update_curr (190 samples, 0.05%)__update_load_avg_cfs_rq (50 samples, 0.01%)__update_load_avg_se (43 samples, 0.01%)update_load_avg (158 samples, 0.04%)dequeue_entity (565 samples, 0.15%)dequeue_task_fair (664 samples, 0.18%)dequeue_task (672 samples, 0.18%)ctx_sched_in (59 samples, 0.02%)visit_groups_merge.constprop.0.isra.0 (57 samples, 0.02%)__perf_event_task_sched_in (108 samples, 0.03%)finish_task_switch.isra.0 (172 samples, 0.05%)pick_next_task_fair (43 samples, 0.01%)pick_next_task (155 samples, 0.04%)put_prev_task_fair (50 samples, 0.01%)perf_ibs_del (92 samples, 0.02%)perf_ibs_stop (71 samples, 0.02%)event_sched_out (103 samples, 0.03%)__pmu_ctx_sched_out (133 samples, 0.04%)group_sched_out (125 samples, 0.03%)ctx_sched_out (177 samples, 0.05%)prepare_task_switch (276 samples, 0.07%)__perf_event_task_sched_out (237 samples, 0.06%)perf_event_context_sched_out (197 samples, 0.05%)psi_group_change (234 samples, 0.06%)psi_task_switch (343 samples, 0.09%)__schedule (1,782 samples, 0.47%)futex_wait_queue (1,981 samples, 0.52%)schedule (1,806 samples, 0.48%)__get_user_nocheck_4 (110 samples, 0.03%)futex_hash (112 samples, 0.03%)futex_q_lock (233 samples, 0.06%)_raw_spin_lock (85 samples, 0.02%)native_queued_spin_lock_slowpath (85 samples, 0.02%)futex_q_unlock (740 samples, 0.20%)futex_wait_setup (1,346 samples, 0.36%)futex_wait (3,533 samples, 0.93%)do_futex (3,584 samples, 0.95%)__x64_sys_futex (3,670 samples, 0.97%)__put_user_8 (52 samples, 0.01%)__rseq_handle_notify_resume (254 samples, 0.07%)exit_to_user_mode_loop (337 samples, 0.09%)exit_to_user_mode_prepare (451 samples, 0.12%)do_syscall_64 (4,206 samples, 1.11%)syscall_exit_to_user_mode (483 samples, 0.13%)entry_SYSCALL_64_after_hwframe (4,268 samples, 1.13%)__GI___lll_lock_wait_private (4,605 samples, 1.22%)futex_wait (4,488 samples, 1.19%)_int_free (5,829 samples, 1.54%)__GI___libc_free (9,251 samples, 2.44%)__..core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (9,429 samples, 2.49%)co..core::ptr::drop_in_place<alloc::vec::Vec<u8>> (9,429 samples, 2.49%)co..core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (9,429 samples, 2.49%)co..<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (9,429 samples, 2.49%)<a..<alloc::alloc::Global as core::alloc::Allocator>::deallocate (9,429 samples, 2.49%)<a..alloc::alloc::dealloc (9,429 samples, 2.49%)al..__rdl_dealloc (9,429 samples, 2.49%)__..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (9,429 samples, 2.49%)st..tracing::span::Span::record_all (174 samples, 0.05%)unlink_chunk (168 samples, 0.04%)core::result::Result<T,E>::expect (227 samples, 0.06%)core::result::Result<T,E>::map_err (66 samples, 0.02%)__GI___clock_gettime (61 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (68 samples, 0.02%)std::time::Instant::elapsed (109 samples, 0.03%)std::time::Instant::now (80 samples, 0.02%)std::sys::pal::unix::time::Instant::now (80 samples, 0.02%)std::sys::pal::unix::cvt (48 samples, 0.01%)__x64_sys_getsockname (580 samples, 0.15%)__sys_getsockname (64 samples, 0.02%)__get_user_4 (812 samples, 0.21%)__put_user_nocheck_4 (1,020 samples, 0.27%)_copy_to_user (40 samples, 0.01%)apparmor_socket_getsockname (82 samples, 0.02%)_raw_spin_lock_bh (1,657 samples, 0.44%)_raw_spin_unlock_bh (55 samples, 0.01%)ip4_datagram_release_cb (93 samples, 0.02%)dequeue_entity (62 samples, 0.02%)dequeue_task (76 samples, 0.02%)dequeue_task_fair (75 samples, 0.02%)__schedule (179 samples, 0.05%)__lock_sock (196 samples, 0.05%)schedule (180 samples, 0.05%)_raw_spin_lock_bh (83 samples, 0.02%)native_queued_spin_lock_slowpath (78 samples, 0.02%)_raw_spin_unlock_bh (125 samples, 0.03%)__local_bh_enable_ip (71 samples, 0.02%)lock_sock_nested (508 samples, 0.13%)autoremove_wake_function (166 samples, 0.04%)default_wake_function (166 samples, 0.04%)try_to_wake_up (166 samples, 0.04%)__wake_up_common (169 samples, 0.04%)__wake_up (189 samples, 0.05%)__wake_up_common_lock (188 samples, 0.05%)_raw_spin_unlock_bh (139 samples, 0.04%)__local_bh_enable_ip (118 samples, 0.03%)inet_getname (3,442 samples, 0.91%)release_sock (961 samples, 0.25%)ip4_datagram_release_cb (340 samples, 0.09%)lock_sock_nested (41 samples, 0.01%)__check_object_size.part.0 (339 samples, 0.09%)check_stack_object (284 samples, 0.08%)__check_object_size (408 samples, 0.11%)check_stack_object (69 samples, 0.02%)move_addr_to_user (2,340 samples, 0.62%)copy_user_enhanced_fast_string (1,695 samples, 0.45%)security_socket_getsockname (298 samples, 0.08%)apparmor_socket_getsockname (254 samples, 0.07%)aa_sk_perm (204 samples, 0.05%)__fget_light (1,889 samples, 0.50%)__fdget (1,912 samples, 0.51%)__sys_getsockname (10,968 samples, 2.90%)__..sockfd_lookup_light (2,045 samples, 0.54%)fput (545 samples, 0.14%)inet_getname (177 samples, 0.05%)__x64_sys_getsockname (11,723 samples, 3.10%)__x..syscall_enter_from_user_mode (48 samples, 0.01%)fpregs_assert_state_consistent (42 samples, 0.01%)exit_to_user_mode_prepare (363 samples, 0.10%)do_syscall_64 (12,453 samples, 3.29%)do_..syscall_exit_to_user_mode (571 samples, 0.15%)fpregs_assert_state_consistent (53 samples, 0.01%)entry_SYSCALL_64_after_hwframe (13,099 samples, 3.46%)ent..__GI_getsockname (13,459 samples, 3.56%)__GI..std::sys_common::net::TcpListener::socket_addr::{{closure}} (13,494 samples, 3.57%)std:..tokio::net::udp::UdpSocket::local_addr (13,569 samples, 3.59%)toki..mio::net::udp::UdpSocket::local_addr (13,568 samples, 3.59%)mio:..std::net::tcp::TcpListener::local_addr (13,568 samples, 3.59%)std:..std::sys_common::net::TcpListener::socket_addr (13,567 samples, 3.59%)std:..std::sys_common::net::sockname (13,559 samples, 3.58%)std:..[[vdso]] (338 samples, 0.09%)rand_chacha::guts::ChaCha::pos64 (177 samples, 0.05%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (67 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (67 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (67 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (67 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (40 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (40 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (40 samples, 0.01%)core::core_arch::x86::avx2::_mm256_or_si256 (44 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (52 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (52 samples, 0.01%)rand_chacha::guts::round (234 samples, 0.06%)rand_chacha::guts::refill_wide::impl_avx2 (472 samples, 0.12%)rand_chacha::guts::refill_wide::fn_impl (472 samples, 0.12%)rand_chacha::guts::refill_wide_impl (472 samples, 0.12%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (825 samples, 0.22%)rand_chacha::guts::ChaCha::refill4 (825 samples, 0.22%)rand::rng::Rng::gen (912 samples, 0.24%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (912 samples, 0.24%)rand::rng::Rng::gen (912 samples, 0.24%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (912 samples, 0.24%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (912 samples, 0.24%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (912 samples, 0.24%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (912 samples, 0.24%)rand_core::block::BlockRng<R>::generate_and_set (848 samples, 0.22%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (847 samples, 0.22%)torrust_tracker::servers::udp::handlers::RequestId::make (941 samples, 0.25%)uuid::v4::<impl uuid::Uuid>::new_v4 (921 samples, 0.24%)uuid::rng::bytes (921 samples, 0.24%)rand::random (921 samples, 0.24%)std::sync::mutex::Mutex<T>::lock (52 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (44 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (43 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (43 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (125 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (68 samples, 0.02%)core::iter::traits::iterator::Iterator::collect (47 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (47 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (47 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (47 samples, 0.01%)<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next (47 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (47 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (93 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (124 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (124 samples, 0.03%)__memcmp_evex_movbe (244 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (89 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (499 samples, 0.13%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (499 samples, 0.13%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (499 samples, 0.13%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (499 samples, 0.13%)<u8 as core::slice::cmp::SliceOrd>::compare (499 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (986 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (917 samples, 0.24%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (917 samples, 0.24%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (998 samples, 0.26%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (1,222 samples, 0.32%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (1,079 samples, 0.29%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (41 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (40 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (173 samples, 0.05%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (251 samples, 0.07%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (251 samples, 0.07%)core::slice::iter::Iter<T>::post_inc_start (78 samples, 0.02%)core::ptr::non_null::NonNull<T>::add (78 samples, 0.02%)[[vdso]] (78 samples, 0.02%)__memcmp_evex_movbe (402 samples, 0.11%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (258 samples, 0.07%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (1,036 samples, 0.27%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (1,036 samples, 0.27%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (1,036 samples, 0.27%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (1,036 samples, 0.27%)<u8 as core::slice::cmp::SliceOrd>::compare (1,036 samples, 0.27%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (1,912 samples, 0.51%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (1,824 samples, 0.48%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (1,824 samples, 0.48%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (55 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (1,928 samples, 0.51%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (118 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (83 samples, 0.02%)__rdl_alloc (40 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (40 samples, 0.01%)alloc::sync::Arc<T>::new (70 samples, 0.02%)alloc::boxed::Box<T>::new (70 samples, 0.02%)alloc::alloc::exchange_malloc (53 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (53 samples, 0.01%)alloc::alloc::Global::alloc_impl (53 samples, 0.01%)alloc::alloc::alloc (53 samples, 0.01%)core::option::Option<T>::is_some_and (58 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (58 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (56 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (56 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (423 samples, 0.11%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (391 samples, 0.10%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (272 samples, 0.07%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (2,519 samples, 0.67%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (2,482 samples, 0.66%)torrust_tracker::core::Tracker::announce::{{closure}} (3,776 samples, 1.00%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (77 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (79 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (91 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (91 samples, 0.02%)core::hash::Hasher::write_u32 (91 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (91 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (91 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (91 samples, 0.02%)<core::time::Duration as core::hash::Hash>::hash (188 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (97 samples, 0.03%)core::hash::Hasher::write_u64 (97 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (97 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (97 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (102 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (292 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (104 samples, 0.03%)core::hash::Hasher::write_u64 (104 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (104 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (104 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (78 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (78 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (77 samples, 0.02%)core::hash::Hasher::write_length_prefix (81 samples, 0.02%)core::hash::Hasher::write_usize (81 samples, 0.02%)core::array::<impl core::hash::Hash for [T: N]>::hash (206 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (206 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (125 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (125 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (125 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (124 samples, 0.03%)core::hash::sip::u8to64_le (47 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (589 samples, 0.16%)[[vdso]] (83 samples, 0.02%)core::num::<impl u128>::checked_div (84 samples, 0.02%)_int_free (40 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::check (748 samples, 0.20%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (154 samples, 0.04%)torrust_tracker_clock::time_extent::Make::now (154 samples, 0.04%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (70 samples, 0.02%)std::time::SystemTime::now (66 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (66 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (68 samples, 0.02%)core::array::<impl core::hash::Hash for [T: N]>::hash (68 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (68 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (68 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (68 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (68 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (68 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (4,754 samples, 1.26%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (80 samples, 0.02%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (43 samples, 0.01%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (43 samples, 0.01%)core::array::<impl core::hash::Hash for [T: N]>::hash (42 samples, 0.01%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (42 samples, 0.01%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (126 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (86 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (104 samples, 0.03%)core::hash::impls::<impl core::hash::Hash for u32>::hash (104 samples, 0.03%)core::hash::Hasher::write_u32 (104 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (104 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (104 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (123 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (232 samples, 0.06%)core::hash::impls::<impl core::hash::Hash for u64>::hash (128 samples, 0.03%)core::hash::Hasher::write_u64 (127 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (127 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (127 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (115 samples, 0.03%)core::hash::sip::u8to64_le (46 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (117 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (117 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (350 samples, 0.09%)core::hash::impls::<impl core::hash::Hash for u64>::hash (118 samples, 0.03%)core::hash::Hasher::write_u64 (118 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (83 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (83 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (82 samples, 0.02%)core::hash::Hasher::write_length_prefix (86 samples, 0.02%)core::hash::Hasher::write_usize (86 samples, 0.02%)core::array::<impl core::hash::Hash for [T: N]>::hash (229 samples, 0.06%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (229 samples, 0.06%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (143 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (143 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (143 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (140 samples, 0.04%)core::hash::sip::u8to64_le (44 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (710 samples, 0.19%)[[vdso]] (68 samples, 0.02%)core::num::<impl u128>::checked_div (70 samples, 0.02%)_int_free (40 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (887 samples, 0.23%)torrust_tracker::servers::udp::connection_cookie::make (865 samples, 0.23%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (146 samples, 0.04%)torrust_tracker_clock::time_extent::Make::now (145 samples, 0.04%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (75 samples, 0.02%)std::time::SystemTime::now (70 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (70 samples, 0.02%)hashbrown::raw::RawTable<T,A>::reserve (47 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (59 samples, 0.02%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (49 samples, 0.01%)torrust_tracker::core::ScrapeData::add_file (61 samples, 0.02%)std::collections::hash::map::HashMap<K,V,S>::insert (61 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (146 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (138 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (138 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (70 samples, 0.02%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (70 samples, 0.02%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (70 samples, 0.02%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (70 samples, 0.02%)<u8 as core::slice::cmp::SliceOrd>::compare (70 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (147 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (5,947 samples, 1.57%)torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (265 samples, 0.07%)torrust_tracker::core::Tracker::scrape::{{closure}} (243 samples, 0.06%)torrust_tracker::core::Tracker::get_swarm_metadata (176 samples, 0.05%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (156 samples, 0.04%)<alloc::string::String as core::fmt::Write>::write_str (54 samples, 0.01%)alloc::string::String::push_str (51 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (51 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (51 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (51 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (186 samples, 0.05%)core::fmt::num::imp::fmt_u64 (178 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (85 samples, 0.02%)core::fmt::num::imp::fmt_u64 (84 samples, 0.02%)<T as alloc::string::ToString>::to_string (319 samples, 0.08%)core::option::Option<T>::expect (71 samples, 0.02%)__GI___libc_free (39 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (112 samples, 0.03%)alloc::alloc::dealloc (112 samples, 0.03%)__rdl_dealloc (112 samples, 0.03%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (112 samples, 0.03%)core::ptr::drop_in_place<alloc::string::String> (162 samples, 0.04%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (162 samples, 0.04%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (162 samples, 0.04%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (162 samples, 0.04%)torrust_tracker::servers::udp::logging::map_action_name (46 samples, 0.01%)binascii::bin2hex (128 samples, 0.03%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.01%)core::fmt::write (47 samples, 0.01%)core::fmt::Formatter::write_fmt (136 samples, 0.04%)core::str::converts::from_utf8 (64 samples, 0.02%)core::str::validations::run_utf8_validation (52 samples, 0.01%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (289 samples, 0.08%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (293 samples, 0.08%)<T as alloc::string::ToString>::to_string (293 samples, 0.08%)torrust_tracker::servers::udp::logging::log_request (1,025 samples, 0.27%)[[vdso]] (111 samples, 0.03%)alloc::raw_vec::finish_grow (120 samples, 0.03%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (134 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_amortized (134 samples, 0.04%)alloc::vec::Vec<T,A>::reserve (141 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::reserve (141 samples, 0.04%)<alloc::string::String as core::fmt::Write>::write_str (148 samples, 0.04%)alloc::string::String::push_str (145 samples, 0.04%)alloc::vec::Vec<T,A>::extend_from_slice (145 samples, 0.04%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (145 samples, 0.04%)alloc::vec::Vec<T,A>::append_elements (145 samples, 0.04%)[[vdso]] (52 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (286 samples, 0.08%)core::fmt::num::imp::fmt_u64 (277 samples, 0.07%)<T as alloc::string::ToString>::to_string (328 samples, 0.09%)core::option::Option<T>::expect (43 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (51 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (51 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (51 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (51 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (33,755 samples, 8.92%)torrust_track..torrust_tracker::servers::udp::logging::log_response (613 samples, 0.16%)futex_wake (46 samples, 0.01%)do_futex (52 samples, 0.01%)__x64_sys_futex (57 samples, 0.02%)__GI___lll_lock_wake_private (63 samples, 0.02%)entry_SYSCALL_64_after_hwframe (60 samples, 0.02%)do_syscall_64 (60 samples, 0.02%)_int_malloc (252 samples, 0.07%)__libc_calloc (366 samples, 0.10%)__memcpy_avx512_unaligned_erms (86 samples, 0.02%)__memset_avx512_unaligned_erms (51 samples, 0.01%)alloc::vec::from_elem (589 samples, 0.16%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (589 samples, 0.16%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (589 samples, 0.16%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (589 samples, 0.16%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (562 samples, 0.15%)alloc::alloc::Global::alloc_impl (562 samples, 0.15%)alloc::alloc::alloc_zeroed (562 samples, 0.15%)__rdl_alloc_zeroed (562 samples, 0.15%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (562 samples, 0.15%)byteorder::ByteOrder::write_i32 (106 samples, 0.03%)<byteorder::BigEndian as byteorder::ByteOrder>::write_u32 (106 samples, 0.03%)core::num::<impl u32>::to_be_bytes (106 samples, 0.03%)core::num::<impl u32>::to_be (106 samples, 0.03%)core::num::<impl u32>::swap_bytes (106 samples, 0.03%)byteorder::io::WriteBytesExt::write_i32 (315 samples, 0.08%)std::io::Write::write_all (208 samples, 0.05%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (208 samples, 0.05%)std::io::cursor::vec_write (208 samples, 0.05%)std::io::cursor::vec_write_unchecked (132 samples, 0.03%)core::ptr::mut_ptr::<impl *mut T>::copy_from (132 samples, 0.03%)core::intrinsics::copy (132 samples, 0.03%)aquatic_udp_protocol::response::Response::write (634 samples, 0.17%)byteorder::io::WriteBytesExt::write_i64 (65 samples, 0.02%)std::io::Write::write_all (43 samples, 0.01%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (43 samples, 0.01%)std::io::cursor::vec_write (43 samples, 0.01%)std::io::cursor::vec_write_unchecked (43 samples, 0.01%)core::ptr::mut_ptr::<impl *mut T>::copy_from (43 samples, 0.01%)core::intrinsics::copy (43 samples, 0.01%)_int_free (514 samples, 0.14%)__GI___libc_free (669 samples, 0.18%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (685 samples, 0.18%)alloc::alloc::dealloc (685 samples, 0.18%)__rdl_dealloc (685 samples, 0.18%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (685 samples, 0.18%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (701 samples, 0.19%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (701 samples, 0.19%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (701 samples, 0.19%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (701 samples, 0.19%)std::io::cursor::Cursor<T>::new (38 samples, 0.01%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (48 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (45 samples, 0.01%)tokio::io::ready::Ready::intersection (115 samples, 0.03%)tokio::io::ready::Ready::from_interest (112 samples, 0.03%)tokio::io::interest::Interest::is_readable (90 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (369 samples, 0.10%)__GI___pthread_disable_asynccancel (147 samples, 0.04%)__x64_sys_sendto (620 samples, 0.16%)__sys_sendto (40 samples, 0.01%)__check_object_size.part.0 (98 samples, 0.03%)check_stack_object (69 samples, 0.02%)__check_object_size (126 samples, 0.03%)_copy_from_user (248 samples, 0.07%)move_addr_to_kernel (1,485 samples, 0.39%)copy_user_enhanced_fast_string (840 samples, 0.22%)apparmor_socket_sendmsg (984 samples, 0.26%)__rcu_read_lock (164 samples, 0.04%)__rcu_read_unlock (68 samples, 0.02%)inet_send_prepare (44 samples, 0.01%)ip_make_skb (46 samples, 0.01%)ip_route_output_flow (54 samples, 0.01%)security_sk_classify_flow (48 samples, 0.01%)__ip_append_data (56 samples, 0.01%)__rcu_read_lock (60 samples, 0.02%)__rcu_read_unlock (54 samples, 0.01%)__check_object_size (55 samples, 0.01%)alloc_skb_with_frags (38 samples, 0.01%)__check_object_size.part.0 (42 samples, 0.01%)__check_heap_object (206 samples, 0.05%)__virt_addr_valid (473 samples, 0.13%)check_heap_object (1,244 samples, 0.33%)__check_object_size.part.0 (1,487 samples, 0.39%)is_vmalloc_addr (68 samples, 0.02%)__check_object_size (1,571 samples, 0.42%)check_stack_object (57 samples, 0.02%)_copy_from_iter (469 samples, 0.12%)ip_generic_getfrag (3,554 samples, 0.94%)copy_user_enhanced_fast_string (1,329 samples, 0.35%)__build_skb_around (233 samples, 0.06%)__kmalloc_node_track_caller (74 samples, 0.02%)___slab_alloc (129 samples, 0.03%)memcg_slab_post_alloc_hook (87 samples, 0.02%)__kmem_cache_alloc_node (1,044 samples, 0.28%)kmalloc_slab (40 samples, 0.01%)memset_erms (331 samples, 0.09%)__kmalloc_node_track_caller (1,662 samples, 0.44%)__kmem_cache_alloc_node (57 samples, 0.02%)kmalloc_reserve (1,865 samples, 0.49%)kmalloc_size_roundup (156 samples, 0.04%)kmalloc_slab (94 samples, 0.02%)___slab_alloc (69 samples, 0.02%)memcg_slab_post_alloc_hook (59 samples, 0.02%)kmem_cache_alloc_node (700 samples, 0.19%)memset_erms (98 samples, 0.03%)__alloc_skb (4,155 samples, 1.10%)alloc_skb_with_frags (4,425 samples, 1.17%)kmem_cache_alloc_node (48 samples, 0.01%)__ip_append_data (11,490 samples, 3.04%)__i..sock_alloc_send_pskb (6,335 samples, 1.67%)skb_set_owner_w (1,576 samples, 0.42%)__ip_select_ident (1,244 samples, 0.33%)ipv4_mtu (151 samples, 0.04%)__ip_make_skb (3,290 samples, 0.87%)siphash_3u32 (619 samples, 0.16%)ip_generic_getfrag (113 samples, 0.03%)ip_setup_cork (313 samples, 0.08%)ipv4_mtu (121 samples, 0.03%)ipv4_mtu (67 samples, 0.02%)ip_make_skb (15,572 samples, 4.12%)ip_m..__rcu_read_lock (38 samples, 0.01%)__rcu_read_unlock (50 samples, 0.01%)__mkroute_output (806 samples, 0.21%)fib_table_lookup (1,252 samples, 0.33%)fib_lookup_good_nhc (418 samples, 0.11%)ip_route_output_key_hash_rcu (2,298 samples, 0.61%)xfrm_lookup_route (258 samples, 0.07%)xfrm_lookup_with_ifid (213 samples, 0.06%)ip_route_output_flow (3,011 samples, 0.80%)security_sk_classify_flow (115 samples, 0.03%)ip_send_check (954 samples, 0.25%)ipv4_conntrack_defrag (148 samples, 0.04%)ipv4_conntrack_local (711 samples, 0.19%)ipv4_conntrack_defrag (219 samples, 0.06%)get_l4proto (223 samples, 0.06%)hash_conntrack_raw (101 samples, 0.03%)__nf_ct_refresh_acct (57 samples, 0.02%)nf_conntrack_handle_packet (676 samples, 0.18%)nf_conntrack_udp_packet (578 samples, 0.15%)nf_ct_get_tuple (65 samples, 0.02%)__nf_conntrack_find_get (1,301 samples, 0.34%)__rcu_read_lock (58 samples, 0.02%)hash_conntrack_raw (427 samples, 0.11%)nf_ct_get_tuple (421 samples, 0.11%)nf_conntrack_in (4,938 samples, 1.31%)resolve_normal_ct (3,247 samples, 0.86%)siphash_4u64 (690 samples, 0.18%)ipv4_conntrack_local (5,156 samples, 1.36%)nf_nat_inet_fn (43 samples, 0.01%)nf_hook_slow (6,435 samples, 1.70%)nf_nat_ipv4_local_fn (718 samples, 0.19%)nf_nat_inet_fn (362 samples, 0.10%)__ip_local_out (8,585 samples, 2.27%)_..nf_nat_ipv4_local_fn (174 samples, 0.05%)__rcu_read_lock (88 samples, 0.02%)__rcu_read_unlock (218 samples, 0.06%)__ip_finish_output (43 samples, 0.01%)__x86_indirect_thunk_rax (62 samples, 0.02%)apparmor_ip_postroute (470 samples, 0.12%)__usecs_to_jiffies (74 samples, 0.02%)_raw_spin_lock_irq (128 samples, 0.03%)_raw_spin_unlock_irq (76 samples, 0.02%)__netif_receive_skb_core.constprop.0 (52 samples, 0.01%)__netif_receive_skb_core.constprop.0 (564 samples, 0.15%)__rcu_read_unlock (54 samples, 0.01%)__rcu_read_unlock (89 samples, 0.02%)__rcu_read_lock (67 samples, 0.02%)__rcu_read_lock (57 samples, 0.02%)__rcu_read_unlock (105 samples, 0.03%)raw_local_deliver (389 samples, 0.10%)raw_v4_input (221 samples, 0.06%)raw_v4_input (38 samples, 0.01%)__udp4_lib_lookup (49 samples, 0.01%)__udp4_lib_lookup (504 samples, 0.13%)udp4_lib_lookup2 (352 samples, 0.09%)udp4_csum_init (812 samples, 0.21%)__udp_enqueue_schedule_skb (59 samples, 0.02%)__rcu_read_lock (61 samples, 0.02%)mem_cgroup_charge_skmem (57 samples, 0.02%)__sk_mem_raise_allocated (100 samples, 0.03%)__udp_enqueue_schedule_skb (1,313 samples, 0.35%)sock_def_readable (155 samples, 0.04%)__x86_indirect_thunk_rax (44 samples, 0.01%)__xfrm_policy_check2.constprop.0 (40 samples, 0.01%)_raw_spin_lock (493 samples, 0.13%)_raw_spin_unlock (136 samples, 0.04%)ipv4_pktinfo_prepare (380 samples, 0.10%)dst_release (281 samples, 0.07%)security_sock_rcv_skb (39 samples, 0.01%)apparmor_socket_sock_rcv_skb (106 samples, 0.03%)sk_filter_trim_cap (322 samples, 0.09%)security_sock_rcv_skb (72 samples, 0.02%)skb_pull_rcsum (53 samples, 0.01%)__udp4_lib_rcv (5,414 samples, 1.43%)udp_unicast_rcv_skb (3,792 samples, 1.00%)udp_queue_rcv_skb (3,762 samples, 0.99%)udp_queue_rcv_one_skb (3,636 samples, 0.96%)sock_def_readable (148 samples, 0.04%)udp4_csum_init (57 samples, 0.02%)ip_protocol_deliver_rcu (6,453 samples, 1.71%)udp_rcv (5,665 samples, 1.50%)udp_unicast_rcv_skb (127 samples, 0.03%)raw_local_deliver (40 samples, 0.01%)ip_local_deliver_finish (6,677 samples, 1.76%)nf_confirm (192 samples, 0.05%)nf_confirm (221 samples, 0.06%)nf_nat_inet_fn (41 samples, 0.01%)nf_hook_slow (767 samples, 0.20%)nf_nat_ipv4_local_in (316 samples, 0.08%)nf_nat_inet_fn (175 samples, 0.05%)ip_local_deliver (8,248 samples, 2.18%)i..nf_nat_ipv4_local_in (181 samples, 0.05%)ip_local_deliver_finish (75 samples, 0.02%)ip_rcv_core (309 samples, 0.08%)ip_rcv_finish_core.constprop.0 (355 samples, 0.09%)ip_sabotage_in (306 samples, 0.08%)ipv4_conntrack_defrag (138 samples, 0.04%)ipv4_conntrack_in (323 samples, 0.09%)ip_sabotage_in (69 samples, 0.02%)ipv4_conntrack_defrag (108 samples, 0.03%)ipv4_conntrack_in (282 samples, 0.07%)nf_conntrack_in (278 samples, 0.07%)nf_conntrack_in (57 samples, 0.02%)nf_hook_slow (1,381 samples, 0.36%)nf_nat_ipv4_pre_routing (432 samples, 0.11%)nf_nat_inet_fn (255 samples, 0.07%)ip_rcv (11,869 samples, 3.14%)ip_..nf_nat_ipv4_pre_routing (201 samples, 0.05%)__netif_receive_skb_one_core (12,879 samples, 3.40%)__n..nf_hook_slow (90 samples, 0.02%)__netif_receive_skb (12,980 samples, 3.43%)__n..__netif_receive_skb_one_core (61 samples, 0.02%)_raw_spin_lock_irq (716 samples, 0.19%)__napi_poll (14,705 samples, 3.89%)__na..process_backlog (14,356 samples, 3.79%)proc.._raw_spin_unlock_irq (225 samples, 0.06%)net_rx_action (15,607 samples, 4.12%)net_..process_backlog (195 samples, 0.05%)__do_softirq (16,392 samples, 4.33%)__do_..__x86_indirect_thunk_rax (38 samples, 0.01%)__local_bh_enable_ip (17,356 samples, 4.59%)__loc..do_softirq.part.0 (17,063 samples, 4.51%)do_so..net_rx_action (146 samples, 0.04%)do_softirq.part.0 (41 samples, 0.01%)__local_bh_enable_ip (78 samples, 0.02%)enqueue_to_backlog (49 samples, 0.01%)__raise_softirq_irqoff (57 samples, 0.02%)_raw_spin_unlock_irqrestore (46 samples, 0.01%)_raw_spin_lock_irqsave (321 samples, 0.08%)__raw_spin_lock_irqsave (320 samples, 0.08%)enqueue_to_backlog (898 samples, 0.24%)_raw_spin_unlock_irqrestore (120 samples, 0.03%)ktime_get_with_offset (165 samples, 0.04%)__netif_rx (2,105 samples, 0.56%)netif_rx_internal (1,977 samples, 0.52%)read_tsc (703 samples, 0.19%)__rcu_read_lock (82 samples, 0.02%)__rcu_read_unlock (38 samples, 0.01%)eth_type_trans (85 samples, 0.02%)skb_clone_tx_timestamp (158 samples, 0.04%)__wake_up_common (61 samples, 0.02%)_raw_read_unlock_irqrestore (65 samples, 0.02%)__task_rq_lock (52 samples, 0.01%)raw_spin_rq_lock_nested (48 samples, 0.01%)_raw_spin_lock (48 samples, 0.01%)native_queued_spin_lock_slowpath (48 samples, 0.01%)_raw_spin_lock_irqsave (273 samples, 0.07%)__raw_spin_lock_irqsave (273 samples, 0.07%)select_task_rq_fair (164 samples, 0.04%)wake_affine (42 samples, 0.01%)__smp_call_single_queue (224 samples, 0.06%)send_call_function_single_ipi (214 samples, 0.06%)native_send_call_func_single_ipi (117 samples, 0.03%)default_send_IPI_single_phys (115 samples, 0.03%)__default_send_IPI_dest_field (105 samples, 0.03%)llist_add_batch (149 samples, 0.04%)sched_clock_cpu (45 samples, 0.01%)sched_clock (44 samples, 0.01%)native_sched_clock (44 samples, 0.01%)ttwu_queue_wakelist (575 samples, 0.15%)try_to_wake_up (1,751 samples, 0.46%)default_wake_function (1,771 samples, 0.47%)__wake_up_common (1,874 samples, 0.50%)ep_autoremove_wake_function (1,787 samples, 0.47%)_raw_spin_lock_irqsave (179 samples, 0.05%)__raw_spin_lock_irqsave (177 samples, 0.05%)native_queued_spin_lock_slowpath (49 samples, 0.01%)__wake_up_common_lock (2,109 samples, 0.56%)__wake_up (2,115 samples, 0.56%)__raw_read_lock_irqsave (1,681 samples, 0.44%)queued_read_lock_slowpath (112 samples, 0.03%)_raw_read_lock_irqsave (1,685 samples, 0.45%)__wake_up_common (6,023 samples, 1.59%)ep_poll_callback (5,577 samples, 1.47%)_raw_read_unlock_irqrestore (549 samples, 0.15%)__raw_spin_lock_irqsave (2,457 samples, 0.65%)native_queued_spin_lock_slowpath (2,074 samples, 0.55%)_raw_spin_lock_irqsave (2,473 samples, 0.65%)_raw_spin_unlock_irqrestore (128 samples, 0.03%)__wake_up_common_lock (8,884 samples, 2.35%)_..ep_poll_callback (159 samples, 0.04%)loopback_xmit (13,069 samples, 3.45%)loo..sock_wfree (10,251 samples, 2.71%)so..__wake_up_sync_key (9,015 samples, 2.38%)__.._raw_spin_unlock_irqrestore (50 samples, 0.01%)skb_clone_tx_timestamp (43 samples, 0.01%)dev_hard_start_xmit (14,617 samples, 3.86%)dev_..sock_wfree (177 samples, 0.05%)loopback_xmit (247 samples, 0.07%)netdev_core_pick_tx (51 samples, 0.01%)qdisc_pkt_len_init (42 samples, 0.01%)netif_skb_features (232 samples, 0.06%)skb_network_protocol (98 samples, 0.03%)validate_xmit_skb (501 samples, 0.13%)validate_xmit_xfrm (46 samples, 0.01%)__dev_queue_xmit (16,029 samples, 4.24%)__dev..ip_finish_output2 (34,592 samples, 9.14%)ip_finish_out..neigh_hh_output (16,177 samples, 4.28%)neigh..ip_skb_dst_mtu (147 samples, 0.04%)__ip_finish_output (35,105 samples, 9.28%)__ip_finish_o..ip_finish_output (35,746 samples, 9.45%)ip_finish_outp..nf_confirm (341 samples, 0.09%)nf_confirm (376 samples, 0.10%)nf_hook_slow (1,736 samples, 0.46%)nf_nat_ipv4_out (902 samples, 0.24%)nf_nat_inet_fn (495 samples, 0.13%)ip_output (40,003 samples, 10.57%)ip_outputnf_nat_ipv4_out (557 samples, 0.15%)ip_send_skb (49,451 samples, 13.07%)ip_send_skbnf_hook_slow (95 samples, 0.03%)udp_send_skb (50,114 samples, 13.24%)udp_send_skbudp4_hwcsum (70 samples, 0.02%)inet_sendmsg (72,507 samples, 19.16%)inet_sendmsgudp_sendmsg (71,624 samples, 18.93%)udp_sendmsgaa_sk_perm (44 samples, 0.01%)security_socket_sendmsg (892 samples, 0.24%)apparmor_socket_sendmsg (549 samples, 0.15%)aa_sk_perm (524 samples, 0.14%)sock_sendmsg (74,871 samples, 19.79%)sock_sendmsgudp_sendmsg (43 samples, 0.01%)__fget_light (1,626 samples, 0.43%)__fdget (1,664 samples, 0.44%)__sys_sendto (79,939 samples, 21.13%)__sys_sendtosockfd_lookup_light (1,915 samples, 0.51%)__fget_light (125 samples, 0.03%)fput (954 samples, 0.25%)import_single_range (191 samples, 0.05%)sockfd_lookup_light (69 samples, 0.02%)__x64_sys_sendto (81,183 samples, 21.46%)__x64_sys_sendtoexit_to_user_mode_prepare (43 samples, 0.01%)merge_sched_in (55 samples, 0.01%)ctx_sched_in (112 samples, 0.03%)visit_groups_merge.constprop.0.isra.0 (111 samples, 0.03%)rb_next (43 samples, 0.01%)__perf_event_task_sched_in (131 samples, 0.03%)finish_task_switch.isra.0 (140 samples, 0.04%)exit_to_user_mode_loop (146 samples, 0.04%)schedule (145 samples, 0.04%)__schedule (145 samples, 0.04%)exit_to_user_mode_prepare (506 samples, 0.13%)do_syscall_64 (82,006 samples, 21.67%)do_syscall_64syscall_exit_to_user_mode (598 samples, 0.16%)syscall_enter_from_user_mode (42 samples, 0.01%)entry_SYSCALL_64_after_hwframe (82,860 samples, 21.90%)entry_SYSCALL_64_after_hwframesyscall_return_via_sysret (245 samples, 0.06%)__libc_sendto (83,829 samples, 22.16%)__libc_sendtostd::sys::pal::unix::cvt (143 samples, 0.04%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (83,973 samples, 22.19%)tokio::net::udp::UdpSocket::send_to..mio::net::udp::UdpSocket::send_to (83,973 samples, 22.19%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (83,973 samples, 22.19%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (83,973 samples, 22.19%)mio::sys::unix::stateless_io_source..mio::net::udp::UdpSocket::send_to::{{closure}} (83,973 samples, 22.19%)mio::net::udp::UdpSocket::send_to::..std::net::udp::UdpSocket::send_to (83,973 samples, 22.19%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (83,973 samples, 22.19%)std::sys_common::net::UdpSocket::se..core::result::Result<T,E>::is_err (368 samples, 0.10%)core::result::Result<T,E>::is_ok (368 samples, 0.10%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (420 samples, 0.11%)tokio::loom::std::mutex::Mutex<T>::lock (416 samples, 0.11%)std::sync::mutex::Mutex<T>::lock (415 samples, 0.11%)std::sys::sync::mutex::futex::Mutex::lock (412 samples, 0.11%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (85,524 samples, 22.60%)torrust_tracker::servers::udp::serve..tokio::net::udp::UdpSocket::send_to::{{closure}} (85,236 samples, 22.53%)tokio::net::udp::UdpSocket::send_to:..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (84,989 samples, 22.46%)tokio::net::udp::UdpSocket::send_to_..tokio::runtime::io::registration::Registration::async_io::{{closure}} (84,879 samples, 22.43%)tokio::runtime::io::registration::Re..tokio::runtime::io::registration::Registration::readiness::{{closure}} (439 samples, 0.12%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (435 samples, 0.11%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (432 samples, 0.11%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (87,857 samples, 23.22%)torrust_tracker::servers::udp::server..torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (124,991 samples, 33.03%)torrust_tracker::servers::udp::server::Udp::process_r..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (123,165 samples, 32.55%)torrust_tracker::servers::udp::server::Udp::process_v..<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (238 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_add (238 samples, 0.06%)core::sync::atomic::atomic_add (238 samples, 0.06%)futex_hash (62 samples, 0.02%)get_futex_key (42 samples, 0.01%)futex_wake (248 samples, 0.07%)wake_up_q (50 samples, 0.01%)try_to_wake_up (46 samples, 0.01%)do_futex (364 samples, 0.10%)__x64_sys_futex (407 samples, 0.11%)do_syscall_64 (468 samples, 0.12%)entry_SYSCALL_64_after_hwframe (496 samples, 0.13%)__GI___lll_lock_wake_private (552 samples, 0.15%)update_load_avg (52 samples, 0.01%)dequeue_entity (134 samples, 0.04%)dequeue_task (163 samples, 0.04%)dequeue_task_fair (160 samples, 0.04%)ctx_sched_out (43 samples, 0.01%)prepare_task_switch (66 samples, 0.02%)__perf_event_task_sched_out (59 samples, 0.02%)perf_event_context_sched_out (52 samples, 0.01%)psi_group_change (39 samples, 0.01%)psi_task_switch (63 samples, 0.02%)__schedule (390 samples, 0.10%)futex_wait_queue (413 samples, 0.11%)schedule (393 samples, 0.10%)futex_q_unlock (143 samples, 0.04%)futex_wait_setup (230 samples, 0.06%)do_futex (688 samples, 0.18%)futex_wait (678 samples, 0.18%)__x64_sys_futex (696 samples, 0.18%)__rseq_handle_notify_resume (45 samples, 0.01%)exit_to_user_mode_loop (57 samples, 0.02%)exit_to_user_mode_prepare (86 samples, 0.02%)entry_SYSCALL_64_after_hwframe (800 samples, 0.21%)do_syscall_64 (795 samples, 0.21%)syscall_exit_to_user_mode (93 samples, 0.02%)__GI___lll_lock_wait_private (860 samples, 0.23%)futex_wait (844 samples, 0.22%)_int_free (1,310 samples, 0.35%)__GI___libc_free (1,905 samples, 0.50%)syscall (58 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::dealloc (47 samples, 0.01%)core::mem::drop (43 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (43 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (43 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (43 samples, 0.01%)tokio::runtime::task::raw::drop_abort_handle (152 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (139 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (139 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (2,198 samples, 0.58%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (2,198 samples, 0.58%)tokio::runtime::task::raw::RawTask::drop_abort_handle (2,195 samples, 0.58%)core::result::Result<T,E>::is_ok (58 samples, 0.02%)tokio::runtime::task::raw::drop_join_handle_slow (47 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (43 samples, 0.01%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (78 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (199 samples, 0.05%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (199 samples, 0.05%)tokio::runtime::task::state::State::drop_join_handle_fast (47 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (47 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (47 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (57 samples, 0.02%)ringbuf::ring_buffer::base::RbBase::vacant_len (47 samples, 0.01%)ringbuf::consumer::Consumer<T,R>::advance (64 samples, 0.02%)ringbuf::ring_buffer::base::RbRead::advance_head (64 samples, 0.02%)ringbuf::consumer::Consumer<T,R>::as_uninit_slices (38 samples, 0.01%)ringbuf::ring_buffer::base::RbRead::occupied_slices (38 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::pop (114 samples, 0.03%)ringbuf::consumer::Consumer<T,R>::pop (114 samples, 0.03%)ringbuf::producer::Producer<T,R>::advance (46 samples, 0.01%)ringbuf::ring_buffer::base::RbWrite::advance_tail (46 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (260 samples, 0.07%)ringbuf::ring_buffer::rb::Rb::push (89 samples, 0.02%)ringbuf::producer::Producer<T,R>::push (89 samples, 0.02%)tokio::runtime::task::state::Snapshot::is_complete (52 samples, 0.01%)tokio::runtime::task::abort::AbortHandle::is_finished (58 samples, 0.02%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (122 samples, 0.03%)tokio::runtime::task::raw::RawTask::ref_inc (122 samples, 0.03%)tokio::runtime::task::state::State::ref_inc (122 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (44 samples, 0.01%)core::sync::atomic::atomic_add (44 samples, 0.01%)dequeue_entity (83 samples, 0.02%)dequeue_task_fair (94 samples, 0.02%)dequeue_task (97 samples, 0.03%)psi_task_switch (52 samples, 0.01%)futex_wait_queue (252 samples, 0.07%)schedule (238 samples, 0.06%)__schedule (236 samples, 0.06%)futex_q_lock (38 samples, 0.01%)futex_q_unlock (146 samples, 0.04%)futex_wait_setup (238 samples, 0.06%)do_futex (520 samples, 0.14%)futex_wait (510 samples, 0.13%)__x64_sys_futex (538 samples, 0.14%)exit_to_user_mode_prepare (51 samples, 0.01%)do_syscall_64 (595 samples, 0.16%)syscall_exit_to_user_mode (52 samples, 0.01%)entry_SYSCALL_64_after_hwframe (603 samples, 0.16%)__GI___lll_lock_wait_private (649 samples, 0.17%)futex_wait (634 samples, 0.17%)futex_hash (66 samples, 0.02%)_raw_spin_lock (46 samples, 0.01%)native_queued_spin_lock_slowpath (46 samples, 0.01%)get_futex_key (40 samples, 0.01%)__smp_call_single_queue (57 samples, 0.02%)send_call_function_single_ipi (54 samples, 0.01%)try_to_wake_up (404 samples, 0.11%)ttwu_queue_wakelist (116 samples, 0.03%)futex_wake (701 samples, 0.19%)wake_up_q (423 samples, 0.11%)do_futex (823 samples, 0.22%)__x64_sys_futex (865 samples, 0.23%)exit_to_user_mode_prepare (56 samples, 0.01%)do_syscall_64 (958 samples, 0.25%)syscall_exit_to_user_mode (63 samples, 0.02%)entry_SYSCALL_64_after_hwframe (992 samples, 0.26%)__GI___lll_lock_wake_private (1,063 samples, 0.28%)malloc_consolidate (690 samples, 0.18%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (779 samples, 0.21%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (192 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (162 samples, 0.04%)_int_malloc (2,142 samples, 0.57%)__GI___libc_malloc (3,995 samples, 1.06%)alloc::vec::Vec<T>::with_capacity (4,013 samples, 1.06%)alloc::vec::Vec<T,A>::with_capacity_in (4,013 samples, 1.06%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (4,006 samples, 1.06%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (4,006 samples, 1.06%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (4,004 samples, 1.06%)alloc::alloc::Global::alloc_impl (4,004 samples, 1.06%)alloc::alloc::alloc (4,004 samples, 1.06%)__rdl_alloc (4,004 samples, 1.06%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (4,004 samples, 1.06%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (175 samples, 0.05%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (175 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (189 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (81 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (47 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (46 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (42 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (144 samples, 0.04%)tokio::net::udp::UdpSocket::readable::{{closure}} (392 samples, 0.10%)tokio::net::udp::UdpSocket::ready::{{closure}} (390 samples, 0.10%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (181 samples, 0.05%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (172 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (135 samples, 0.04%)__GI___pthread_disable_asynccancel (75 samples, 0.02%)__x64_sys_recvfrom (179 samples, 0.05%)__sys_recvfrom (40 samples, 0.01%)__get_user_4 (139 samples, 0.04%)__put_user_nocheck_4 (875 samples, 0.23%)__check_object_size.part.0 (127 samples, 0.03%)check_stack_object (103 samples, 0.03%)__check_object_size (158 samples, 0.04%)move_addr_to_user (955 samples, 0.25%)copy_user_enhanced_fast_string (704 samples, 0.19%)apparmor_socket_recvmsg (80 samples, 0.02%)__check_object_size (53 samples, 0.01%)__skb_recv_udp (43 samples, 0.01%)__check_object_size.part.0 (45 samples, 0.01%)__check_heap_object (63 samples, 0.02%)__check_heap_object (309 samples, 0.08%)__virt_addr_valid (486 samples, 0.13%)check_heap_object (1,230 samples, 0.33%)__check_object_size.part.0 (1,550 samples, 0.41%)is_vmalloc_addr (91 samples, 0.02%)__check_object_size (1,697 samples, 0.45%)check_stack_object (122 samples, 0.03%)__skb_try_recv_from_queue (265 samples, 0.07%)_raw_spin_lock (56 samples, 0.01%)_raw_spin_unlock_bh (498 samples, 0.13%)__local_bh_enable_ip (441 samples, 0.12%)__refill_stock (48 samples, 0.01%)drain_stock (42 samples, 0.01%)__sk_mem_reduce_allocated (74 samples, 0.02%)mem_cgroup_uncharge_skmem (74 samples, 0.02%)refill_stock (61 samples, 0.02%)__skb_recv_udp (1,279 samples, 0.34%)udp_rmem_release (187 samples, 0.05%)_copy_to_iter (408 samples, 0.11%)_raw_spin_lock_bh (603 samples, 0.16%)_raw_spin_unlock_bh (38 samples, 0.01%)copy_user_enhanced_fast_string (2,356 samples, 0.62%)cmpxchg_double_slab.constprop.0.isra.0 (1,503 samples, 0.40%)__slab_free (3,062 samples, 0.81%)put_cpu_partial (75 samples, 0.02%)__unfreeze_partials (70 samples, 0.02%)cache_from_obj (75 samples, 0.02%)cmpxchg_double_slab.constprop.0.isra.0 (85 samples, 0.02%)kfree_skbmem (3,703 samples, 0.98%)kmem_cache_free (3,665 samples, 0.97%)cmpxchg_double_slab.constprop.0.isra.0 (404 samples, 0.11%)__slab_free (1,382 samples, 0.37%)put_cpu_partial (57 samples, 0.02%)__unfreeze_partials (54 samples, 0.01%)__kmem_cache_free (1,536 samples, 0.41%)__consume_stateless_skb (7,001 samples, 1.85%)_..skb_release_data (3,272 samples, 0.86%)skb_free_head (1,626 samples, 0.43%)kfree (1,609 samples, 0.43%)skb_consume_udp (7,079 samples, 1.87%)s..inet_recvmsg (15,244 samples, 4.03%)inet..udp_recvmsg (15,018 samples, 3.97%)udp_..security_socket_recvmsg (249 samples, 0.07%)apparmor_socket_recvmsg (171 samples, 0.05%)aa_sk_perm (146 samples, 0.04%)sock_recvmsg (15,648 samples, 4.14%)sock_..__fget_light (1,217 samples, 0.32%)__fdget (1,243 samples, 0.33%)__sys_recvfrom (19,882 samples, 5.25%)__sys_..sockfd_lookup_light (1,397 samples, 0.37%)__fget_light (44 samples, 0.01%)fput (482 samples, 0.13%)import_single_range (71 samples, 0.02%)__x64_sys_recvfrom (20,466 samples, 5.41%)__x64_s..exit_to_user_mode_prepare (364 samples, 0.10%)do_syscall_64 (21,043 samples, 5.56%)do_sysc..syscall_exit_to_user_mode (485 samples, 0.13%)entry_SYSCALL_64_after_hwframe (21,266 samples, 5.62%)entry_S..syscall_return_via_sysret (39 samples, 0.01%)__libc_recvfrom (21,805 samples, 5.76%)__libc_..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (21,997 samples, 5.81%)tokio::..mio::net::udp::UdpSocket::recv_from (21,912 samples, 5.79%)mio::ne..mio::io_source::IoSource<T>::do_io (21,912 samples, 5.79%)mio::io..mio::sys::unix::stateless_io_source::IoSourceState::do_io (21,912 samples, 5.79%)mio::sy..mio::net::udp::UdpSocket::recv_from::{{closure}} (21,912 samples, 5.79%)mio::ne..std::net::udp::UdpSocket::recv_from (21,912 samples, 5.79%)std::ne..std::sys_common::net::UdpSocket::recv_from (21,912 samples, 5.79%)std::sy..std::sys::pal::unix::net::Socket::recv_from (21,912 samples, 5.79%)std::sy..std::sys::pal::unix::net::Socket::recv_from_with_flags (21,912 samples, 5.79%)std::sy..std::sys_common::net::sockaddr_to_addr (45 samples, 0.01%)__GI___libc_malloc (49 samples, 0.01%)_int_malloc (51 samples, 0.01%)core::result::Result<T,E>::is_err (45 samples, 0.01%)core::result::Result<T,E>::is_ok (45 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (69 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (63 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (58 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (58 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (282 samples, 0.07%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (238 samples, 0.06%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (119 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (27,792 samples, 7.35%)torrust_tr..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (22,942 samples, 6.06%)tokio::n..tokio::runtime::io::registration::Registration::async_io::{{closure}} (22,924 samples, 6.06%)tokio::r..__memcpy_avx512_unaligned_erms (92 samples, 0.02%)__memcpy_avx512_unaligned_erms (2,847 samples, 0.75%)__memcpy_avx512_unaligned_erms (837 samples, 0.22%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (42 samples, 0.01%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (42 samples, 0.01%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (42 samples, 0.01%)core::cell::Cell<T>::set (42 samples, 0.01%)core::cell::Cell<T>::replace (42 samples, 0.01%)core::mem::replace (42 samples, 0.01%)core::ptr::write (42 samples, 0.01%)__x64_sys_futex (80 samples, 0.02%)futex_hash (110 samples, 0.03%)futex_wake_mark (43 samples, 0.01%)get_futex_key (112 samples, 0.03%)wake_q_add_safe (39 samples, 0.01%)_raw_spin_lock_irqsave (93 samples, 0.02%)__raw_spin_lock_irqsave (93 samples, 0.02%)select_task_rq_fair (322 samples, 0.09%)wake_affine (103 samples, 0.03%)available_idle_cpu (73 samples, 0.02%)__smp_call_single_queue (251 samples, 0.07%)send_call_function_single_ipi (243 samples, 0.06%)native_send_call_func_single_ipi (94 samples, 0.02%)default_send_IPI_single_phys (94 samples, 0.02%)__default_send_IPI_dest_field (85 samples, 0.02%)llist_add_batch (231 samples, 0.06%)sched_clock_cpu (68 samples, 0.02%)sched_clock (61 samples, 0.02%)native_sched_clock (61 samples, 0.02%)ttwu_queue_wakelist (704 samples, 0.19%)try_to_wake_up (1,611 samples, 0.43%)futex_wake (2,206 samples, 0.58%)wake_up_q (1,679 samples, 0.44%)do_futex (2,433 samples, 0.64%)__x64_sys_futex (2,563 samples, 0.68%)do_futex (62 samples, 0.02%)do_syscall_64 (2,732 samples, 0.72%)syscall_exit_to_user_mode (68 samples, 0.02%)exit_to_user_mode_prepare (58 samples, 0.02%)entry_SYSCALL_64_after_hwframe (2,827 samples, 0.75%)syscall (2,981 samples, 0.79%)core::ptr::drop_in_place<core::option::Option<tokio::runtime::task::Notified<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (83 samples, 0.02%)core::sync::atomic::AtomicU32::store (58 samples, 0.02%)core::sync::atomic::atomic_store (58 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_finish (83 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (115 samples, 0.03%)tokio::runtime::context::with_scheduler (451 samples, 0.12%)std::thread::local::LocalKey<T>::try_with (403 samples, 0.11%)tokio::runtime::context::with_scheduler::{{closure}} (394 samples, 0.10%)tokio::runtime::context::scoped::Scoped<T>::with (387 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (385 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (381 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (338 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (59 samples, 0.02%)alloc::vec::Vec<T,A>::pop (85 samples, 0.02%)core::ptr::read (56 samples, 0.01%)std::sync::mutex::MutexGuard<T>::new (55 samples, 0.01%)std::sync::poison::Flag::guard (55 samples, 0.01%)std::thread::panicking (52 samples, 0.01%)std::panicking::panicking (52 samples, 0.01%)std::panicking::panic_count::count_is_zero (52 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (156 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (145 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (90 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock_contended (43 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (2,704 samples, 0.71%)core::sync::atomic::atomic_add (2,704 samples, 0.71%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (3,112 samples, 0.82%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (2,853 samples, 0.75%)tokio::runtime::scheduler::multi_thread::idle::State::num_unparked (80 samples, 0.02%)__fget_light (82 samples, 0.02%)__fdget_pos (85 samples, 0.02%)try_to_wake_up (70 samples, 0.02%)__wake_up_common (74 samples, 0.02%)ep_autoremove_wake_function (71 samples, 0.02%)default_wake_function (71 samples, 0.02%)_raw_spin_lock_irqsave (68 samples, 0.02%)__raw_spin_lock_irqsave (68 samples, 0.02%)native_queued_spin_lock_slowpath (58 samples, 0.02%)__wake_up_common_lock (144 samples, 0.04%)__wake_up (145 samples, 0.04%)_raw_read_lock_irqsave (177 samples, 0.05%)__raw_read_lock_irqsave (177 samples, 0.05%)__wake_up_common (458 samples, 0.12%)ep_poll_callback (447 samples, 0.12%)__wake_up_locked_key (461 samples, 0.12%)_raw_spin_lock_irq (139 samples, 0.04%)eventfd_write (716 samples, 0.19%)copy_user_enhanced_fast_string (76 samples, 0.02%)__x64_sys_write (884 samples, 0.23%)ksys_write (873 samples, 0.23%)vfs_write (758 samples, 0.20%)do_syscall_64 (930 samples, 0.25%)syscall_exit_to_user_mode (40 samples, 0.01%)entry_SYSCALL_64_after_hwframe (950 samples, 0.25%)__GI___libc_write (1,017 samples, 0.27%)__GI___libc_write (1,014 samples, 0.27%)tokio::runtime::driver::Handle::unpark (1,023 samples, 0.27%)tokio::runtime::driver::IoHandle::unpark (1,023 samples, 0.27%)tokio::runtime::io::driver::Handle::unpark (1,023 samples, 0.27%)mio::waker::Waker::wake (1,022 samples, 0.27%)mio::sys::unix::waker::fdbased::Waker::wake (1,022 samples, 0.27%)mio::sys::unix::waker::eventfd::WakerInternal::wake (1,022 samples, 0.27%)<&std::fs::File as std::io::Write>::write (1,021 samples, 0.27%)std::sys::pal::unix::fs::File::write (1,021 samples, 0.27%)std::sys::pal::unix::fd::FileDesc::write (1,021 samples, 0.27%)tokio::runtime::context::with_scheduler (7,649 samples, 2.02%)t..std::thread::local::LocalKey<T>::try_with (7,636 samples, 2.02%)s..tokio::runtime::context::with_scheduler::{{closure}} (7,635 samples, 2.02%)t..tokio::runtime::context::scoped::Scoped<T>::with (7,634 samples, 2.02%)t..tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (7,634 samples, 2.02%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (7,634 samples, 2.02%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (7,591 samples, 2.01%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (7,591 samples, 2.01%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (1,047 samples, 0.28%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (1,043 samples, 0.28%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (7,948 samples, 2.10%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (7,946 samples, 2.10%)t..tokio::runtime::scheduler::multi_thread::worker::with_current (7,946 samples, 2.10%)t..tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (90 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (63 samples, 0.02%)core::result::Result<T,E>::is_err (409 samples, 0.11%)core::result::Result<T,E>::is_ok (409 samples, 0.11%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (992 samples, 0.26%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (797 samples, 0.21%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (781 samples, 0.21%)tokio::loom::std::mutex::Mutex<T>::lock (773 samples, 0.20%)std::sync::mutex::Mutex<T>::lock (773 samples, 0.20%)std::sys::sync::mutex::futex::Mutex::lock (769 samples, 0.20%)core::sync::atomic::AtomicU32::compare_exchange (360 samples, 0.10%)core::sync::atomic::atomic_compare_exchange (360 samples, 0.10%)__memcpy_avx512_unaligned_erms (410 samples, 0.11%)__GI___lll_lock_wake_private (103 samples, 0.03%)__memcpy_avx512_unaligned_erms (180 samples, 0.05%)update_curr (47 samples, 0.01%)update_load_avg (57 samples, 0.02%)dequeue_entity (144 samples, 0.04%)dequeue_task (169 samples, 0.04%)dequeue_task_fair (166 samples, 0.04%)ctx_sched_out (41 samples, 0.01%)prepare_task_switch (61 samples, 0.02%)__perf_event_task_sched_out (56 samples, 0.01%)perf_event_context_sched_out (48 samples, 0.01%)psi_group_change (57 samples, 0.02%)psi_task_switch (96 samples, 0.03%)__schedule (428 samples, 0.11%)futex_wait_queue (471 samples, 0.12%)schedule (433 samples, 0.11%)futex_q_lock (46 samples, 0.01%)futex_q_unlock (217 samples, 0.06%)futex_wait_setup (344 samples, 0.09%)futex_wait (864 samples, 0.23%)do_futex (877 samples, 0.23%)__x64_sys_futex (885 samples, 0.23%)__rseq_handle_notify_resume (77 samples, 0.02%)exit_to_user_mode_loop (96 samples, 0.03%)exit_to_user_mode_prepare (127 samples, 0.03%)do_syscall_64 (1,027 samples, 0.27%)syscall_exit_to_user_mode (136 samples, 0.04%)entry_SYSCALL_64_after_hwframe (1,035 samples, 0.27%)__GI___lll_lock_wait_private (1,121 samples, 0.30%)futex_wait (1,095 samples, 0.29%)futex_hash (66 samples, 0.02%)__smp_call_single_queue (40 samples, 0.01%)send_call_function_single_ipi (39 samples, 0.01%)futex_wake (608 samples, 0.16%)wake_up_q (362 samples, 0.10%)try_to_wake_up (353 samples, 0.09%)ttwu_queue_wakelist (78 samples, 0.02%)do_futex (729 samples, 0.19%)__x64_sys_futex (747 samples, 0.20%)exit_to_user_mode_prepare (44 samples, 0.01%)do_syscall_64 (822 samples, 0.22%)syscall_exit_to_user_mode (54 samples, 0.01%)entry_SYSCALL_64_after_hwframe (838 samples, 0.22%)__GI___lll_lock_wake_private (916 samples, 0.24%)_int_free (220 samples, 0.06%)__alloc_pages (48 samples, 0.01%)do_anonymous_page (127 samples, 0.03%)vma_alloc_folio (59 samples, 0.02%)__folio_alloc (52 samples, 0.01%)handle_pte_fault (132 samples, 0.03%)__handle_mm_fault (136 samples, 0.04%)handle_mm_fault (153 samples, 0.04%)do_user_addr_fault (170 samples, 0.04%)asm_exc_page_fault (177 samples, 0.05%)exc_page_fault (175 samples, 0.05%)perf_event_mmap_output (55 samples, 0.01%)perf_event_mmap_event (115 samples, 0.03%)perf_iterate_sb (111 samples, 0.03%)perf_iterate_ctx (100 samples, 0.03%)perf_event_mmap (124 samples, 0.03%)mas_preallocate (48 samples, 0.01%)mas_alloc_nodes (48 samples, 0.01%)mas_wr_store_entry.isra.0 (43 samples, 0.01%)mas_store_prealloc (73 samples, 0.02%)__vma_adjust (154 samples, 0.04%)vma_mas_store (78 samples, 0.02%)vma_merge (173 samples, 0.05%)mprotect_fixup (330 samples, 0.09%)do_mprotect_pkey (384 samples, 0.10%)__x64_sys_mprotect (390 samples, 0.10%)grow_heap (401 samples, 0.11%)__GI_mprotect (399 samples, 0.11%)entry_SYSCALL_64_after_hwframe (395 samples, 0.10%)do_syscall_64 (392 samples, 0.10%)sysmalloc (595 samples, 0.16%)_int_malloc (1,239 samples, 0.33%)unlink_chunk (55 samples, 0.01%)_int_memalign (1,518 samples, 0.40%)core::option::Option<T>::map (14,436 samples, 3.82%)core..tokio::task::spawn::spawn_inner::{{closure}} (14,430 samples, 3.81%)toki..tokio::runtime::scheduler::Handle::spawn (14,425 samples, 3.81%)toki..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (14,422 samples, 3.81%)toki..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (14,411 samples, 3.81%)toki..tokio::runtime::task::list::OwnedTasks<S>::bind (5,536 samples, 1.46%)tokio::runtime::task::new_task (4,490 samples, 1.19%)tokio::runtime::task::raw::RawTask::new (4,490 samples, 1.19%)tokio::runtime::task::core::Cell<T,S>::new (4,490 samples, 1.19%)alloc::boxed::Box<T>::new (4,034 samples, 1.07%)alloc::alloc::exchange_malloc (3,715 samples, 0.98%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (3,702 samples, 0.98%)alloc::alloc::Global::alloc_impl (3,702 samples, 0.98%)alloc::alloc::alloc (3,702 samples, 0.98%)__rdl_alloc (3,702 samples, 0.98%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (3,702 samples, 0.98%)std::sys::pal::unix::alloc::aligned_malloc (3,702 samples, 0.98%)__posix_memalign (3,659 samples, 0.97%)__posix_memalign (3,656 samples, 0.97%)_mid_memalign (3,656 samples, 0.97%)tokio::runtime::context::current::with_current (17,481 samples, 4.62%)tokio..std::thread::local::LocalKey<T>::try_with (17,476 samples, 4.62%)std::..tokio::runtime::context::current::with_current::{{closure}} (17,340 samples, 4.58%)tokio..tokio::task::spawn::spawn (17,548 samples, 4.64%)tokio..tokio::task::spawn::spawn_inner (17,548 samples, 4.64%)tokio..tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (173,936 samples, 45.97%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (173,935 samples, 45.97%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (48,629 samples, 12.85%)torrust_tracker::se..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (48,628 samples, 12.85%)torrust_tracker::se..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (17,569 samples, 4.64%)torru..__memcpy_avx512_unaligned_erms (51 samples, 0.01%)__memcpy_avx512_unaligned_erms (2,052 samples, 0.54%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (2,067 samples, 0.55%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (2,067 samples, 0.55%)tokio::runtime::task::core::Core<T,S>::poll (176,078 samples, 46.54%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (2,140 samples, 0.57%)tokio::runtime::task::core::Core<T,S>::set_stage (2,138 samples, 0.57%)__memcpy_avx512_unaligned_erms (48 samples, 0.01%)__memcpy_avx512_unaligned_erms (856 samples, 0.23%)__memcpy_avx512_unaligned_erms (1,693 samples, 0.45%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (1,703 samples, 0.45%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (1,702 samples, 0.45%)tokio::runtime::task::core::Core<T,S>::set_stage (2,570 samples, 0.68%)tokio::runtime::task::harness::poll_future (178,737 samples, 47.24%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (178,737 samples, 47.24%)std::panic::catch_unwindstd::panicking::try (178,737 samples, 47.24%)std::panicking::trystd::panicking::try::do_call (178,737 samples, 47.24%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (178,737 samples, 47.24%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<..tokio::runtime::task::harness::poll_future::{{closure}} (178,737 samples, 47.24%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (2,659 samples, 0.70%)tokio::runtime::task::state::State::transition_to_running (348 samples, 0.09%)tokio::runtime::task::state::State::fetch_update_action (348 samples, 0.09%)tokio::runtime::task::raw::poll (181,363 samples, 47.93%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (181,220 samples, 47.90%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (179,097 samples, 47.33%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::coop::budget (181,972 samples, 48.09%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (181,972 samples, 48.09%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (181,912 samples, 48.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (181,909 samples, 48.08%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (181,909 samples, 48.08%)tokio::runtime::task::raw::RawTask::polltokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (110 samples, 0.03%)__x64_sys_futex (295 samples, 0.08%)_raw_spin_lock (91 samples, 0.02%)futex_hash (195 samples, 0.05%)futex_wake_mark (84 samples, 0.02%)get_futex_key (163 samples, 0.04%)wake_q_add_safe (121 samples, 0.03%)select_task_rq_fair (60 samples, 0.02%)_raw_spin_lock_irqsave (167 samples, 0.04%)__raw_spin_lock_irqsave (164 samples, 0.04%)available_idle_cpu (62 samples, 0.02%)select_idle_sibling (121 samples, 0.03%)select_task_rq_fair (1,048 samples, 0.28%)wake_affine (382 samples, 0.10%)available_idle_cpu (315 samples, 0.08%)__smp_call_single_queue (294 samples, 0.08%)send_call_function_single_ipi (280 samples, 0.07%)native_send_call_func_single_ipi (53 samples, 0.01%)default_send_IPI_single_phys (53 samples, 0.01%)__default_send_IPI_dest_field (49 samples, 0.01%)llist_add_batch (533 samples, 0.14%)sched_clock_cpu (152 samples, 0.04%)sched_clock (144 samples, 0.04%)native_sched_clock (143 samples, 0.04%)ttwu_queue_wakelist (1,388 samples, 0.37%)try_to_wake_up (3,169 samples, 0.84%)futex_wake (4,300 samples, 1.14%)wake_up_q (3,312 samples, 0.88%)do_futex (4,709 samples, 1.24%)__x64_sys_futex (4,862 samples, 1.28%)do_futex (47 samples, 0.01%)exit_to_user_mode_prepare (62 samples, 0.02%)do_syscall_64 (5,079 samples, 1.34%)syscall_exit_to_user_mode (90 samples, 0.02%)entry_SYSCALL_64_after_hwframe (5,409 samples, 1.43%)syscall (5,655 samples, 1.49%)tokio::loom::std::mutex::Mutex<T>::lock (75 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (71 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (1,216 samples, 0.32%)core::sync::atomic::atomic_add (1,216 samples, 0.32%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (1,348 samples, 0.36%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (1,238 samples, 0.33%)fput (40 samples, 0.01%)__fget_light (155 samples, 0.04%)__fdget_pos (165 samples, 0.04%)_raw_spin_lock_irqsave (39 samples, 0.01%)__raw_spin_lock_irqsave (39 samples, 0.01%)__wake_up_common (172 samples, 0.05%)ep_autoremove_wake_function (163 samples, 0.04%)default_wake_function (162 samples, 0.04%)try_to_wake_up (159 samples, 0.04%)ttwu_queue_wakelist (43 samples, 0.01%)__wake_up_common_lock (308 samples, 0.08%)_raw_spin_lock_irqsave (131 samples, 0.03%)__raw_spin_lock_irqsave (131 samples, 0.03%)native_queued_spin_lock_slowpath (121 samples, 0.03%)__wake_up (311 samples, 0.08%)_raw_read_lock_irqsave (369 samples, 0.10%)__raw_read_lock_irqsave (368 samples, 0.10%)__wake_up_common (972 samples, 0.26%)ep_poll_callback (937 samples, 0.25%)_raw_read_unlock_irqrestore (39 samples, 0.01%)__wake_up_locked_key (989 samples, 0.26%)_copy_from_user (42 samples, 0.01%)_raw_spin_lock_irq (428 samples, 0.11%)eventfd_write (1,641 samples, 0.43%)copy_user_enhanced_fast_string (139 samples, 0.04%)ksys_write (1,936 samples, 0.51%)vfs_write (1,722 samples, 0.46%)rw_verify_area (42 samples, 0.01%)security_file_permission (38 samples, 0.01%)__x64_sys_write (1,980 samples, 0.52%)exit_to_user_mode_prepare (42 samples, 0.01%)do_syscall_64 (2,071 samples, 0.55%)syscall_exit_to_user_mode (74 samples, 0.02%)entry_SYSCALL_64_after_hwframe (2,113 samples, 0.56%)__GI___libc_write (2,270 samples, 0.60%)__GI___libc_write (2,271 samples, 0.60%)mio::sys::unix::waker::eventfd::WakerInternal::wake (2,284 samples, 0.60%)<&std::fs::File as std::io::Write>::write (2,277 samples, 0.60%)std::sys::pal::unix::fs::File::write (2,277 samples, 0.60%)std::sys::pal::unix::fd::FileDesc::write (2,277 samples, 0.60%)__entry_text_start (206 samples, 0.05%)tokio::runtime::driver::Handle::unpark (2,522 samples, 0.67%)tokio::runtime::driver::IoHandle::unpark (2,522 samples, 0.67%)tokio::runtime::io::driver::Handle::unpark (2,522 samples, 0.67%)mio::waker::Waker::wake (2,518 samples, 0.67%)mio::sys::unix::waker::fdbased::Waker::wake (2,518 samples, 0.67%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (234 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (234 samples, 0.06%)tokio::runtime::driver::Handle::unpark (233 samples, 0.06%)tokio::runtime::driver::IoHandle::unpark (233 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (2,547 samples, 0.67%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (2,543 samples, 0.67%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (191,736 samples, 50.67%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (9,564 samples, 2.53%)to..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (9,562 samples, 2.53%)to..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (9,559 samples, 2.53%)to..tokio::runtime::scheduler::multi_thread::worker::Core::tune_global_queue_interval (67 samples, 0.02%)tokio::runtime::scheduler::multi_thread::stats::Stats::tuned_global_queue_interval (65 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (162 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (201 samples, 0.05%)alloc::sync::Arc<T,A>::inner (201 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (201 samples, 0.05%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (327 samples, 0.09%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (327 samples, 0.09%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (326 samples, 0.09%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (127 samples, 0.03%)alloc::sync::Arc<T,A>::inner (127 samples, 0.03%)core::ptr::non_null::NonNull<T>::as_ref (127 samples, 0.03%)core::num::<impl u32>::wrapping_sub (314 samples, 0.08%)core::sync::atomic::AtomicU64::load (129 samples, 0.03%)core::sync::atomic::atomic_load (129 samples, 0.03%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (110 samples, 0.03%)core::sync::atomic::AtomicU32::load (110 samples, 0.03%)core::sync::atomic::atomic_load (110 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (168 samples, 0.04%)alloc::sync::Arc<T,A>::inner (168 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (168 samples, 0.04%)core::num::<impl u32>::wrapping_add (92 samples, 0.02%)core::num::<impl u32>::wrapping_sub (172 samples, 0.05%)core::sync::atomic::AtomicU32::load (163 samples, 0.04%)core::sync::atomic::atomic_load (163 samples, 0.04%)core::sync::atomic::AtomicU64::load (405 samples, 0.11%)core::sync::atomic::atomic_load (405 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::pack (166 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (2,781 samples, 0.73%)tokio::runtime::scheduler::multi_thread::queue::unpack (540 samples, 0.14%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (3,784 samples, 1.00%)tokio::runtime::scheduler::multi_thread::queue::unpack (134 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (423 samples, 0.11%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (99 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (8,170 samples, 2.16%)t..tokio::util::rand::FastRand::fastrand_n (71 samples, 0.02%)tokio::util::rand::FastRand::fastrand (71 samples, 0.02%)std::panic::catch_unwind (262,281 samples, 69.32%)std::panic::catch_unwindstd::panicking::try (262,281 samples, 69.32%)std::panicking::trystd::panicking::try::do_call (262,281 samples, 69.32%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (262,281 samples, 69.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (262,281 samples, 69.32%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}std::sys_common::backtrace::__rust_begin_short_backtrace (262,281 samples, 69.32%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (262,281 samples, 69.32%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (262,281 samples, 69.32%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (262,211 samples, 69.30%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (262,211 samples, 69.30%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (262,211 samples, 69.30%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (262,211 samples, 69.30%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (262,211 samples, 69.30%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (262,211 samples, 69.30%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (262,210 samples, 69.30%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (262,210 samples, 69.30%)std::panic::catch_unwindstd::panicking::try (262,210 samples, 69.30%)std::panicking::trystd::panicking::try::do_call (262,210 samples, 69.30%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (262,210 samples, 69.30%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (262,210 samples, 69.30%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (262,210 samples, 69.30%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (262,210 samples, 69.30%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (262,210 samples, 69.30%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (262,210 samples, 69.30%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (262,210 samples, 69.30%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (262,210 samples, 69.30%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (262,210 samples, 69.30%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (262,210 samples, 69.30%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (262,210 samples, 69.30%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (262,210 samples, 69.30%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::Context::run<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (262,282 samples, 69.32%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (262,282 samples, 69.32%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (262,282 samples, 69.32%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (262,282 samples, 69.32%)std::thread::Builder::spawn_unchecked_::{{closure}}clone3 (262,316 samples, 69.33%)clone3start_thread (262,316 samples, 69.33%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (262,299 samples, 69.32%)std::sys::pal::unix::thread::Thread::new::thread_startcore::fmt::Formatter::pad_integral (390 samples, 0.10%)core::fmt::Formatter::pad_integral::write_prefix (126 samples, 0.03%)core::fmt::Formatter::pad_integral (41 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (113 samples, 0.03%)__x64_sys_futex (173 samples, 0.05%)__x64_sys_getsockname (51 samples, 0.01%)__x64_sys_sendto (43 samples, 0.01%)__x64_sys_epoll_wait (165 samples, 0.04%)read_tsc (70 samples, 0.02%)futex_hash (127 samples, 0.03%)futex_wait_setup (101 samples, 0.03%)get_futex_key (39 samples, 0.01%)futex_wait (270 samples, 0.07%)futex_wake (506 samples, 0.13%)get_futex_key (181 samples, 0.05%)do_futex (1,060 samples, 0.28%)__x64_sys_futex (1,255 samples, 0.33%)__fget_light (87 samples, 0.02%)__fdget (88 samples, 0.02%)__sys_getsockname (168 samples, 0.04%)sockfd_lookup_light (98 samples, 0.03%)__x64_sys_getsockname (171 samples, 0.05%)__fget_light (55 samples, 0.01%)__fdget (57 samples, 0.02%)__sys_recvfrom (187 samples, 0.05%)sockfd_lookup_light (72 samples, 0.02%)__x64_sys_recvfrom (198 samples, 0.05%)__sys_sendto (165 samples, 0.04%)sockfd_lookup_light (51 samples, 0.01%)__x64_sys_sendto (177 samples, 0.05%)do_futex (48 samples, 0.01%)do_syscall_64 (2,325 samples, 0.61%)syscall_enter_from_user_mode (175 samples, 0.05%)entry_SYSCALL_64_after_hwframe (3,067 samples, 0.81%)syscall_enter_from_user_mode (207 samples, 0.05%)entry_SYSCALL_64_safe_stack (222 samples, 0.06%)rand_chacha::guts::round (69 samples, 0.02%)rand_chacha::guts::refill_wide::impl_avx2 (80 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (80 samples, 0.02%)rand_chacha::guts::refill_wide_impl (80 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (113 samples, 0.03%)core::cell::RefCell<T>::try_borrow_mut (113 samples, 0.03%)core::cell::BorrowRefMut::new (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (156 samples, 0.04%)tokio::runtime::coop::budget (154 samples, 0.04%)tokio::runtime::coop::with_budget (154 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (136 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (305 samples, 0.08%)std::sys::pal::unix::time::Timespec::sub_timespec (148 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (110 samples, 0.03%)std::sys_common::net::TcpListener::socket_addr (40 samples, 0.01%)syscall (90 samples, 0.02%)syscall_return_via_sysret (133 samples, 0.04%)std::sync::poison::Flag::done (42 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (47 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (47 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (71 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (71 samples, 0.02%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (58 samples, 0.02%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::run (113 samples, 0.03%)tokio::runtime::context::runtime::enter_runtime (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (113 samples, 0.03%)tokio::runtime::context::set_scheduler (113 samples, 0.03%)std::thread::local::LocalKey<T>::with (113 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (113 samples, 0.03%)tokio::runtime::context::set_scheduler::{{closure}} (113 samples, 0.03%)tokio::runtime::context::scoped::Scoped<T>::set (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (59 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (55 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (53 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (116 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (116 samples, 0.03%)tokio::runtime::task::harness::poll_future (119 samples, 0.03%)std::panic::catch_unwind (119 samples, 0.03%)std::panicking::try (119 samples, 0.03%)std::panicking::try::do_call (119 samples, 0.03%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (119 samples, 0.03%)tokio::runtime::task::harness::poll_future::{{closure}} (119 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::poll (119 samples, 0.03%)tokio::runtime::task::raw::poll (136 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::poll (123 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (122 samples, 0.03%)torrust_tracker::bootstrap::logging::INIT (43 samples, 0.01%)__memcpy_avx512_unaligned_erms (652 samples, 0.17%)__entry_text_start (57 samples, 0.02%)_int_free (317 samples, 0.08%)_int_malloc (313 samples, 0.08%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (1,319 samples, 0.35%)__GI___lll_lock_wait_private (265 samples, 0.07%)futex_wait (154 samples, 0.04%)futex_fatal_error (55 samples, 0.01%)__memcpy_avx512_unaligned_erms (1,079 samples, 0.29%)_int_free (67 samples, 0.02%)_int_malloc (120 samples, 0.03%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (1,582 samples, 0.42%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (122 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (59 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (45 samples, 0.01%)__GI___libc_malloc (123 samples, 0.03%)_int_malloc (143 samples, 0.04%)alloc::vec::Vec<T>::with_capacity (237 samples, 0.06%)alloc::vec::Vec<T,A>::with_capacity_in (237 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (234 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (234 samples, 0.06%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (231 samples, 0.06%)alloc::alloc::Global::alloc_impl (231 samples, 0.06%)alloc::alloc::alloc (231 samples, 0.06%)__rdl_alloc (231 samples, 0.06%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (231 samples, 0.06%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (207 samples, 0.05%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (207 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (207 samples, 0.05%)__entry_text_start (140 samples, 0.04%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (286 samples, 0.08%)mio::net::udp::UdpSocket::recv_from (273 samples, 0.07%)mio::io_source::IoSource<T>::do_io (273 samples, 0.07%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (273 samples, 0.07%)mio::net::udp::UdpSocket::recv_from::{{closure}} (273 samples, 0.07%)std::net::udp::UdpSocket::recv_from (273 samples, 0.07%)std::sys_common::net::UdpSocket::recv_from (273 samples, 0.07%)std::sys::pal::unix::net::Socket::recv_from (273 samples, 0.07%)std::sys::pal::unix::net::Socket::recv_from_with_flags (273 samples, 0.07%)core::mem::zeroed (131 samples, 0.03%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (131 samples, 0.03%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (131 samples, 0.03%)core::intrinsics::write_bytes (131 samples, 0.03%)__entry_text_start (131 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (639 samples, 0.17%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (360 samples, 0.10%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (355 samples, 0.09%)__entry_text_start (171 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (186 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (186 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::with_current (186 samples, 0.05%)tokio::runtime::context::with_scheduler (185 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (185 samples, 0.05%)tokio::runtime::context::with_scheduler::{{closure}} (185 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::with (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (185 samples, 0.05%)tokio::runtime::driver::Handle::unpark (185 samples, 0.05%)tokio::runtime::driver::IoHandle::unpark (185 samples, 0.05%)tokio::runtime::io::driver::Handle::unpark (185 samples, 0.05%)mio::waker::Waker::wake (185 samples, 0.05%)mio::sys::unix::waker::fdbased::Waker::wake (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (182 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (182 samples, 0.05%)tokio::runtime::driver::Handle::unpark (178 samples, 0.05%)tokio::runtime::driver::IoHandle::unpark (178 samples, 0.05%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (1,051 samples, 0.28%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (1,045 samples, 0.28%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (319 samples, 0.08%)tokio::task::spawn::spawn (319 samples, 0.08%)tokio::task::spawn::spawn_inner (319 samples, 0.08%)tokio::runtime::context::current::with_current (319 samples, 0.08%)std::thread::local::LocalKey<T>::try_with (319 samples, 0.08%)tokio::runtime::context::current::with_current::{{closure}} (319 samples, 0.08%)core::option::Option<T>::map (319 samples, 0.08%)tokio::task::spawn::spawn_inner::{{closure}} (319 samples, 0.08%)tokio::runtime::scheduler::Handle::spawn (319 samples, 0.08%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (319 samples, 0.08%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (319 samples, 0.08%)tokio::runtime::task::list::OwnedTasks<S>::bind (133 samples, 0.04%)tokio::runtime::task::new_task (77 samples, 0.02%)tokio::runtime::task::raw::RawTask::new (77 samples, 0.02%)tokio::runtime::task::core::Cell<T,S>::new (77 samples, 0.02%)alloc::boxed::Box<T>::new (40 samples, 0.01%)all (378,369 samples, 100%)tokio-runtime-w (378,173 samples, 99.95%)tokio-runtime-w \ No newline at end of file diff --git a/docs/media/flamegraph_generated_without_sudo.svg b/docs/media/flamegraph_generated_without_sudo.svg new file mode 100644 index 000000000..e3df85866 --- /dev/null +++ b/docs/media/flamegraph_generated_without_sudo.svg @@ -0,0 +1,491 @@ +Flame Graph Reset ZoomSearch [unknown] (188 samples, 0.14%)[unknown] (187 samples, 0.14%)[unknown] (186 samples, 0.14%)[unknown] (178 samples, 0.14%)[unknown] (172 samples, 0.13%)[unknown] (158 samples, 0.12%)[unknown] (158 samples, 0.12%)[unknown] (125 samples, 0.10%)[unknown] (102 samples, 0.08%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (41 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (25 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (15 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)profiling (214 samples, 0.16%)clone3 (22 samples, 0.02%)start_thread (22 samples, 0.02%)std::sys::pal::unix::thread::Thread::new::thread_start (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::Handler::new (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::make_handler (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::get_stack (19 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (30 samples, 0.02%)[[vdso]] (93 samples, 0.07%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (143 samples, 0.11%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (31 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (21 samples, 0.02%)[[vdso]] (91 samples, 0.07%)__GI___clock_gettime (14 samples, 0.01%)_int_malloc (53 samples, 0.04%)epoll_wait (254 samples, 0.19%)tokio::runtime::context::with_scheduler (28 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::context::with_scheduler::{{closure}} (14 samples, 0.01%)core::option::Option<T>::map (17 samples, 0.01%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (17 samples, 0.01%)mio::poll::Poll::poll (27 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select (27 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (54 samples, 0.04%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (26 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (65 samples, 0.05%)core::sync::atomic::AtomicUsize::fetch_add (65 samples, 0.05%)core::sync::atomic::atomic_add (65 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (31 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (49 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (33 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (93 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Parker::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (75 samples, 0.06%)core::cell::RefCell<T>::borrow_mut (18 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (18 samples, 0.01%)core::cell::BorrowRefMut::new (18 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (96 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (18 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (14 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (220 samples, 0.17%)<T as core::slice::cmp::SliceContains>::slice_contains (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (54 samples, 0.04%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (54 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (240 samples, 0.18%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (265 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park (284 samples, 0.22%)core::option::Option<T>::or_else (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (40 samples, 0.03%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (17 samples, 0.01%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (17 samples, 0.01%)core::num::<impl u32>::wrapping_add (17 samples, 0.01%)core::sync::atomic::AtomicU64::compare_exchange (26 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (129 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (128 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (119 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::pack (39 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run (613 samples, 0.47%)tokio::runtime::context::runtime::enter_runtime (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (613 samples, 0.47%)tokio::runtime::context::set_scheduler (613 samples, 0.47%)std::thread::local::LocalKey<T>::with (613 samples, 0.47%)std::thread::local::LocalKey<T>::try_with (613 samples, 0.47%)tokio::runtime::context::set_scheduler::{{closure}} (613 samples, 0.47%)tokio::runtime::context::scoped::Scoped<T>::set (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Context::run (613 samples, 0.47%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (777 samples, 0.59%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (776 samples, 0.59%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (16 samples, 0.01%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::runtime::context::set_current_task_id (16 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (16 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (835 samples, 0.64%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (56 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (46 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (897 samples, 0.68%)tokio::runtime::task::harness::poll_future::{{closure}} (897 samples, 0.68%)tokio::runtime::task::core::Core<T,S>::store_output (62 samples, 0.05%)tokio::runtime::task::harness::poll_future (930 samples, 0.71%)std::panic::catch_unwind (927 samples, 0.71%)std::panicking::try (927 samples, 0.71%)std::panicking::try::do_call (925 samples, 0.70%)core::mem::manually_drop::ManuallyDrop<T>::take (28 samples, 0.02%)core::ptr::read (28 samples, 0.02%)tokio::runtime::task::raw::poll (938 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll (934 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (934 samples, 0.71%)core::array::<impl core::default::Default for [T: 32]>::default (26 samples, 0.02%)tokio::runtime::time::Inner::lock (16 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::time::wheel::Wheel::poll (25 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (98 samples, 0.07%)tokio::runtime::time::Driver::park_internal (51 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (24 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (131 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (24 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (14 samples, 0.01%)core::sync::atomic::AtomicU32::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (39 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (34 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (32 samples, 0.02%)[[heap]] (2,361 samples, 1.80%)[..[[vdso]] (313 samples, 0.24%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (41 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (67 samples, 0.05%)alloc::string::String::push_str (18 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (18 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (18 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (18 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (36 samples, 0.03%)core::num::<impl u64>::rotate_left (28 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (60 samples, 0.05%)core::num::<impl u64>::wrapping_add (14 samples, 0.01%)core::hash::sip::u8to64_le (60 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (184 samples, 0.14%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (15 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (19 samples, 0.01%)core::cell::Cell<T>::get (17 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (26 samples, 0.02%)core::ops::function::FnMut::call_mut (21 samples, 0.02%)tokio::runtime::coop::poll_proceed (21 samples, 0.02%)tokio::runtime::context::budget (21 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (21 samples, 0.02%)[unknown] (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (195 samples, 0.15%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (14 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (14 samples, 0.01%)core::result::Result<T,E>::is_err (18 samples, 0.01%)core::result::Result<T,E>::is_ok (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (46 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (39 samples, 0.03%)core::sync::atomic::AtomicU32::compare_exchange (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (19 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (245 samples, 0.19%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (26 samples, 0.02%)[[vdso]] (748 samples, 0.57%)[profiling] (34 samples, 0.03%)core::fmt::write (31 samples, 0.02%)__GI___clock_gettime (29 samples, 0.02%)__GI___libc_free (131 samples, 0.10%)arena_for_chunk (20 samples, 0.02%)arena_for_chunk (19 samples, 0.01%)heap_for_ptr (19 samples, 0.01%)heap_max_size (14 samples, 0.01%)__GI___libc_malloc (114 samples, 0.09%)__GI___libc_realloc (15 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)__GI___pthread_disable_asynccancel (66 samples, 0.05%)__GI_getsockname (249 samples, 0.19%)__libc_calloc (15 samples, 0.01%)__libc_recvfrom (23 samples, 0.02%)__libc_sendto (130 samples, 0.10%)__memcmp_evex_movbe (451 samples, 0.34%)__memcpy_avx512_unaligned_erms (426 samples, 0.32%)__memset_avx512_unaligned_erms (215 samples, 0.16%)__posix_memalign (17 samples, 0.01%)_int_free (418 samples, 0.32%)tcache_put (24 samples, 0.02%)_int_malloc (385 samples, 0.29%)_int_memalign (31 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (26 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (15 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_one (15 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (96 samples, 0.07%)alloc::raw_vec::RawVec<T,A>::grow_amortized (66 samples, 0.05%)core::num::<impl usize>::checked_add (18 samples, 0.01%)core::num::<impl usize>::overflowing_add (18 samples, 0.01%)alloc::raw_vec::finish_grow (74 samples, 0.06%)alloc::sync::Arc<T,A>::drop_slow (16 samples, 0.01%)core::mem::drop (14 samples, 0.01%)core::fmt::Formatter::pad_integral (14 samples, 0.01%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (93 samples, 0.07%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (23 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (188 samples, 0.14%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (30 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_connect::{{closure}}> (22 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_packet::{{closure}}> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}}> (19 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::send_response::{{closure}}> (22 samples, 0.02%)malloc_consolidate (24 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (15 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)rand_chacha::guts::round (66 samples, 0.05%)rand_chacha::guts::refill_wide::impl_avx2 (99 samples, 0.08%)rand_chacha::guts::refill_wide::fn_impl (98 samples, 0.07%)rand_chacha::guts::refill_wide_impl (98 samples, 0.07%)std::io::error::Error::kind (14 samples, 0.01%)[unknown] (42 samples, 0.03%)[unknown] (14 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (490 samples, 0.37%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (211 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (84 samples, 0.06%)tokio::runtime::task::core::Header::get_owner_id (18 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (18 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (20 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (31 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (29 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (108 samples, 0.08%)tokio::runtime::task::core::TaskIdGuard::enter (14 samples, 0.01%)tokio::runtime::context::set_current_task_id (14 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (21 samples, 0.02%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (32 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (54 samples, 0.04%)tokio::runtime::task::raw::drop_abort_handle (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (17 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (22 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (22 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (79 samples, 0.06%)core::slice::<impl [T]>::contains (178 samples, 0.14%)<T as core::slice::cmp::SliceContains>::slice_contains (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (40 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (40 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (216 samples, 0.16%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (219 samples, 0.17%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (29 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (29 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (54 samples, 0.04%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (18 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (113 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (31 samples, 0.02%)core::sync::atomic::AtomicU64::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (447 samples, 0.34%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (174 samples, 0.13%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (489 samples, 0.37%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (489 samples, 0.37%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run (484 samples, 0.37%)tokio::runtime::context::runtime::enter_runtime (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (484 samples, 0.37%)tokio::runtime::context::set_scheduler (484 samples, 0.37%)std::thread::local::LocalKey<T>::with (484 samples, 0.37%)std::thread::local::LocalKey<T>::try_with (484 samples, 0.37%)tokio::runtime::context::set_scheduler::{{closure}} (484 samples, 0.37%)tokio::runtime::context::scoped::Scoped<T>::set (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Context::run (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (24 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (20 samples, 0.02%)tokio::runtime::task::raw::poll (515 samples, 0.39%)tokio::runtime::task::harness::Harness<T,S>::poll (493 samples, 0.38%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (493 samples, 0.38%)tokio::runtime::task::harness::poll_future (493 samples, 0.38%)std::panic::catch_unwind (493 samples, 0.38%)std::panicking::try (493 samples, 0.38%)std::panicking::try::do_call (493 samples, 0.38%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (493 samples, 0.38%)tokio::runtime::task::harness::poll_future::{{closure}} (493 samples, 0.38%)tokio::runtime::task::core::Core<T,S>::poll (493 samples, 0.38%)tokio::runtime::time::wheel::Wheel::next_expiration (16 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (27 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (15 samples, 0.01%)torrust_tracker::core::Tracker::send_stats_event::{{closure}} (44 samples, 0.03%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (15 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (29 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (74 samples, 0.06%)torrust_tracker::servers::udp::peer_builder::from_request (17 samples, 0.01%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (51 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (54 samples, 0.04%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (58 samples, 0.04%)torrust_tracker::core::Tracker::announce::{{closure}} (70 samples, 0.05%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (113 samples, 0.09%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (175 samples, 0.13%)<T as alloc::string::ToString>::to_string (38 samples, 0.03%)core::option::Option<T>::expect (56 samples, 0.04%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (18 samples, 0.01%)<T as alloc::string::ToString>::to_string (18 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (180 samples, 0.14%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (468 samples, 0.36%)torrust_tracker::servers::udp::logging::log_response (38 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (669 samples, 0.51%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (152 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (147 samples, 0.11%)tokio::net::udp::UdpSocket::send_to::{{closure}} (138 samples, 0.11%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (119 samples, 0.09%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (75 samples, 0.06%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to (39 samples, 0.03%)mio::io_source::IoSource<T>::do_io (39 samples, 0.03%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to::{{closure}} (39 samples, 0.03%)std::net::udp::UdpSocket::send_to (39 samples, 0.03%)std::sys_common::net::UdpSocket::send_to (39 samples, 0.03%)std::sys::pal::unix::cvt (39 samples, 0.03%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (39 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_stats (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (14 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count::to_usize::{{closure}} (33 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats::{{closure}} (33 samples, 0.03%)torrust_tracker_primitives::peer::Peer::is_seeder (33 samples, 0.03%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (75 samples, 0.06%)core::iter::traits::iterator::Iterator::sum (75 samples, 0.06%)<usize as core::iter::traits::accum::Sum>::sum (75 samples, 0.06%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (75 samples, 0.06%)core::iter::traits::iterator::Iterator::fold (75 samples, 0.06%)core::iter::adapters::map::map_fold::{{closure}} (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (104 samples, 0.08%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (24 samples, 0.02%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (215 samples, 0.16%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (198 samples, 0.15%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (89 samples, 0.07%)core::option::Option<T>::is_some_and (32 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (30 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (30 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (26 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (58 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (58 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (58 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (238 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (236 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (208 samples, 0.16%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (208 samples, 0.16%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (282 samples, 0.21%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (67 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (22 samples, 0.02%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (22 samples, 0.02%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (22 samples, 0.02%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (22 samples, 0.02%)<u8 as core::slice::cmp::SliceOrd>::compare (22 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (43 samples, 0.03%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (43 samples, 0.03%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (43 samples, 0.03%)<u8 as core::slice::cmp::SliceOrd>::compare (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (151 samples, 0.12%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (145 samples, 0.11%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (137 samples, 0.10%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (137 samples, 0.10%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (266 samples, 0.20%)core::sync::atomic::AtomicU32::load (27 samples, 0.02%)core::sync::atomic::atomic_load (27 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (38 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (37 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (36 samples, 0.03%)tracing::span::Span::log (16 samples, 0.01%)tracing::span::Span::record_all (70 samples, 0.05%)unlink_chunk (139 samples, 0.11%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (30 samples, 0.02%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (30 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (28 samples, 0.02%)[anon] (8,759 samples, 6.67%)[anon]uuid::v4::<impl uuid::Uuid>::new_v4 (32 samples, 0.02%)uuid::rng::bytes (32 samples, 0.02%)rand::random (32 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (15 samples, 0.01%)_int_free (338 samples, 0.26%)tcache_put (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (22 samples, 0.02%)hashbrown::raw::h2 (14 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (23 samples, 0.02%)hashbrown::raw::RawTableInner::find_or_find_insert_slot_inner (17 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (25 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (15 samples, 0.01%)[profiling] (545 samples, 0.42%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (32 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (22 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (30 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (28 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (83 samples, 0.06%)alloc::string::String::push_str (57 samples, 0.04%)alloc::vec::Vec<T,A>::extend_from_slice (57 samples, 0.04%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (57 samples, 0.04%)alloc::vec::Vec<T,A>::append_elements (57 samples, 0.04%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (20 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (41 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (151 samples, 0.12%)core::hash::sip::u8to64_le (50 samples, 0.04%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (33 samples, 0.03%)tokio::runtime::context::CONTEXT::__getit (35 samples, 0.03%)core::cell::Cell<T>::get (33 samples, 0.03%)[unknown] (20 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (75 samples, 0.06%)core::ops::function::FnMut::call_mut (66 samples, 0.05%)tokio::runtime::coop::poll_proceed (66 samples, 0.05%)tokio::runtime::context::budget (66 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (66 samples, 0.05%)tokio::runtime::context::budget::{{closure}} (27 samples, 0.02%)tokio::runtime::coop::poll_proceed::{{closure}} (27 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (110 samples, 0.08%)[unknown] (15 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (27 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (27 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (70 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (55 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (55 samples, 0.04%)[unknown] (33 samples, 0.03%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (214 samples, 0.16%)__memcpy_avx512_unaligned_erms (168 samples, 0.13%)[profiling] (171 samples, 0.13%)binascii::bin2hex (77 samples, 0.06%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (280 samples, 0.21%)[unknown] (317 samples, 0.24%)[[vdso]] (2,648 samples, 2.02%)[..[unknown] (669 samples, 0.51%)[unknown] (396 samples, 0.30%)[unknown] (251 samples, 0.19%)[unknown] (65 samples, 0.05%)[unknown] (30 samples, 0.02%)[unknown] (21 samples, 0.02%)__GI___clock_gettime (56 samples, 0.04%)arena_for_chunk (72 samples, 0.05%)arena_for_chunk (62 samples, 0.05%)heap_for_ptr (49 samples, 0.04%)heap_max_size (28 samples, 0.02%)__GI___libc_free (194 samples, 0.15%)arena_for_chunk (19 samples, 0.01%)checked_request2size (24 samples, 0.02%)__GI___libc_malloc (220 samples, 0.17%)tcache_get (44 samples, 0.03%)__GI___libc_write (25 samples, 0.02%)__GI___libc_write (14 samples, 0.01%)__GI___pthread_disable_asynccancel (97 samples, 0.07%)core::num::<impl u128>::leading_zeros (15 samples, 0.01%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (72 samples, 0.05%)__floattidf (90 samples, 0.07%)compiler_builtins::float::conv::__floattidf (86 samples, 0.07%)exp_inline (40 samples, 0.03%)log_inline (64 samples, 0.05%)__ieee754_pow_fma (114 samples, 0.09%)__libc_calloc (106 samples, 0.08%)__libc_recvfrom (252 samples, 0.19%)__libc_sendto (133 samples, 0.10%)__memcmp_evex_movbe (137 samples, 0.10%)__memcpy_avx512_unaligned_erms (1,399 samples, 1.07%)__posix_memalign (172 samples, 0.13%)__posix_memalign (80 samples, 0.06%)_mid_memalign (71 samples, 0.05%)arena_for_chunk (14 samples, 0.01%)__pow (18 samples, 0.01%)__vdso_clock_gettime (40 samples, 0.03%)[unknown] (24 samples, 0.02%)_int_free (462 samples, 0.35%)tcache_put (54 samples, 0.04%)[unknown] (14 samples, 0.01%)_int_malloc (508 samples, 0.39%)_int_memalign (68 samples, 0.05%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (78 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::grow_amortized (73 samples, 0.06%)alloc::raw_vec::finish_grow (91 samples, 0.07%)core::result::Result<T,E>::map_err (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Weak<ring::ec::curve25519::ed25519::signing::Ed25519KeyPair,&alloc::alloc::Global>> (16 samples, 0.01%)<alloc::sync::Weak<T,A> as core::ops::drop::Drop>::drop (16 samples, 0.01%)core::mem::drop (18 samples, 0.01%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)alloc_new_heap (49 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (49 samples, 0.04%)core::fmt::Formatter::pad_integral (40 samples, 0.03%)core::fmt::Formatter::pad_integral::write_prefix (19 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (155 samples, 0.12%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (71 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (245 samples, 0.19%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (33 samples, 0.03%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (37 samples, 0.03%)core::str::converts::from_utf8 (33 samples, 0.03%)core::str::validations::run_utf8_validation (20 samples, 0.02%)epoll_wait (31 samples, 0.02%)hashbrown::map::HashMap<K,V,S,A>::insert (17 samples, 0.01%)rand_chacha::guts::refill_wide (19 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (17 samples, 0.01%)std_detect::detect::check_for (17 samples, 0.01%)std_detect::detect::cache::test (17 samples, 0.01%)std_detect::detect::cache::Cache::test (17 samples, 0.01%)core::sync::atomic::AtomicUsize::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)std::sys::pal::unix::time::Timespec::new (29 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (132 samples, 0.10%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (22 samples, 0.02%)core::cmp::PartialOrd::ge (22 samples, 0.02%)std::sys::pal::unix::time::Timespec::sub_timespec (67 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock_contended (18 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr (29 samples, 0.02%)std::sys_common::net::sockname (28 samples, 0.02%)syscall (552 samples, 0.42%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (74 samples, 0.06%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (74 samples, 0.06%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (74 samples, 0.06%)core::cell::Cell<T>::set (74 samples, 0.06%)core::cell::Cell<T>::replace (74 samples, 0.06%)core::mem::replace (74 samples, 0.06%)core::ptr::write (74 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (14 samples, 0.01%)tokio::runtime::context::with_scheduler (176 samples, 0.13%)std::thread::local::LocalKey<T>::try_with (152 samples, 0.12%)tokio::runtime::context::with_scheduler::{{closure}} (151 samples, 0.12%)tokio::runtime::context::scoped::Scoped<T>::with (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (16 samples, 0.01%)core::option::Option<T>::map (19 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (24 samples, 0.02%)mio::poll::Poll::poll (53 samples, 0.04%)mio::sys::unix::selector::epoll::Selector::select (53 samples, 0.04%)core::result::Result<T,E>::map (28 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (28 samples, 0.02%)tokio::io::ready::Ready::from_mio (14 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (126 samples, 0.10%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (18 samples, 0.01%)[unknown] (51 samples, 0.04%)[unknown] (100 samples, 0.08%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (326 samples, 0.25%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (205 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (77 samples, 0.06%)[unknown] (26 samples, 0.02%)<tokio::util::linked_list::DrainFilter<T,F> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (396 samples, 0.30%)tokio::loom::std::mutex::Mutex<T>::lock (18 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (573 samples, 0.44%)core::sync::atomic::AtomicUsize::fetch_add (566 samples, 0.43%)core::sync::atomic::atomic_add (566 samples, 0.43%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (635 samples, 0.48%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (44 samples, 0.03%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (21 samples, 0.02%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (21 samples, 0.02%)core::sync::atomic::AtomicUsize::load (21 samples, 0.02%)core::sync::atomic::atomic_load (21 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id (32 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (32 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (32 samples, 0.02%)std::sync::poison::Flag::done (32 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (43 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (43 samples, 0.03%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (123 samples, 0.09%)tokio::runtime::task::list::OwnedTasks<S>::remove (117 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (80 samples, 0.06%)tokio::runtime::scheduler::defer::Defer::wake (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (71 samples, 0.05%)std::sync::condvar::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (56 samples, 0.04%)core::sync::atomic::AtomicUsize::compare_exchange (37 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (138 samples, 0.11%)tokio::runtime::driver::Driver::park (77 samples, 0.06%)tokio::runtime::driver::TimeDriver::park (77 samples, 0.06%)tokio::runtime::time::Driver::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Parker::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::park::Inner::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (432 samples, 0.33%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (26 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (94 samples, 0.07%)core::cell::RefCell<T>::try_borrow_mut (94 samples, 0.07%)core::cell::BorrowRefMut::new (94 samples, 0.07%)tokio::runtime::coop::budget (142 samples, 0.11%)tokio::runtime::coop::with_budget (142 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (121 samples, 0.09%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (44 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (208 samples, 0.16%)tokio::runtime::signal::Driver::process (30 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (46 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (35 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::set_stage (75 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_xor (76 samples, 0.06%)core::sync::atomic::atomic_xor (76 samples, 0.06%)tokio::runtime::task::state::State::transition_to_complete (79 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::complete (113 samples, 0.09%)tokio::runtime::task::state::State::transition_to_terminal (18 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (28 samples, 0.02%)core::mem::drop (18 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (18 samples, 0.01%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (16 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (16 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (53 samples, 0.04%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (21 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (113 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (15 samples, 0.01%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (15 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (14 samples, 0.01%)tokio::runtime::task::raw::drop_abort_handle (82 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (23 samples, 0.02%)tokio::runtime::task::state::State::ref_dec (23 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (34 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (32 samples, 0.02%)tokio::runtime::task::state::State::unset_join_interested (23 samples, 0.02%)tokio::runtime::task::state::State::fetch_update (23 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (43 samples, 0.03%)core::num::<impl u32>::wrapping_add (23 samples, 0.02%)core::option::Option<T>::or_else (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (59 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (45 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (132 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (63 samples, 0.05%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run (290 samples, 0.22%)tokio::runtime::context::runtime::enter_runtime (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (290 samples, 0.22%)tokio::runtime::context::set_scheduler (290 samples, 0.22%)std::thread::local::LocalKey<T>::with (290 samples, 0.22%)std::thread::local::LocalKey<T>::try_with (290 samples, 0.22%)tokio::runtime::context::set_scheduler::{{closure}} (290 samples, 0.22%)tokio::runtime::context::scoped::Scoped<T>::set (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Context::run (290 samples, 0.22%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (327 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (322 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll (333 samples, 0.25%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (342 samples, 0.26%)tokio::runtime::task::harness::poll_future::{{closure}} (342 samples, 0.26%)tokio::runtime::task::harness::poll_future (348 samples, 0.27%)std::panic::catch_unwind (347 samples, 0.26%)std::panicking::try (347 samples, 0.26%)std::panicking::try::do_call (347 samples, 0.26%)core::sync::atomic::AtomicUsize::compare_exchange (18 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (18 samples, 0.01%)tokio::runtime::task::state::State::transition_to_running (47 samples, 0.04%)tokio::runtime::task::state::State::fetch_update_action (47 samples, 0.04%)tokio::runtime::task::state::State::transition_to_running::{{closure}} (19 samples, 0.01%)tokio::runtime::task::raw::poll (427 samples, 0.33%)tokio::runtime::task::harness::Harness<T,S>::poll (408 samples, 0.31%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (407 samples, 0.31%)tokio::runtime::task::state::State::transition_to_idle (17 samples, 0.01%)core::array::<impl core::default::Default for [T: 32]>::default (21 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (14 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (72 samples, 0.05%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (23 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (15 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (14 samples, 0.01%)tokio::runtime::time::Driver::park_internal (155 samples, 0.12%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (96 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (35 samples, 0.03%)core::num::<impl usize>::pow (35 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (39 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (33 samples, 0.03%)core::num::<impl usize>::pow (33 samples, 0.03%)tokio::runtime::time::wheel::level::Level::next_expiration (208 samples, 0.16%)tokio::runtime::time::wheel::level::slot_range (48 samples, 0.04%)core::num::<impl usize>::pow (48 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (277 samples, 0.21%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (18 samples, 0.01%)core::option::Option<T>::is_some (18 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (50 samples, 0.04%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (37 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (19 samples, 0.01%)core::iter::traits::iterator::Iterator::collect (17 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (17 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (20 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (62 samples, 0.05%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (40 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (27 samples, 0.02%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (17 samples, 0.01%)torrust_tracker::servers::udp::peer_builder::from_request (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (19 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (355 samples, 0.27%)<F as core::future::into_future::IntoFuture>::into_future (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (37 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (25 samples, 0.02%)core::sync::atomic::atomic_add (25 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet (14 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (20 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::result::Result<T,E>::map_err (16 samples, 0.01%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (136 samples, 0.10%)torrust_tracker::core::Tracker::announce::{{closure}} (173 samples, 0.13%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (267 samples, 0.20%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (30 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (423 samples, 0.32%)core::fmt::Formatter::new (26 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (80 samples, 0.06%)core::fmt::num::imp::fmt_u64 (58 samples, 0.04%)core::intrinsics::copy_nonoverlapping (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (74 samples, 0.06%)core::fmt::num::imp::fmt_u64 (70 samples, 0.05%)<T as alloc::string::ToString>::to_string (207 samples, 0.16%)core::option::Option<T>::expect (19 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (18 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (18 samples, 0.01%)torrust_tracker::servers::udp::logging::map_action_name (25 samples, 0.02%)alloc::str::<impl alloc::borrow::ToOwned for str>::to_owned (14 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (345 samples, 0.26%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (18 samples, 0.01%)core::fmt::num::imp::fmt_u64 (14 samples, 0.01%)<T as alloc::string::ToString>::to_string (35 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (1,067 samples, 0.81%)torrust_tracker::servers::udp::logging::log_response (72 samples, 0.05%)alloc::vec::from_elem (68 samples, 0.05%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (68 samples, 0.05%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (68 samples, 0.05%)alloc::alloc::Global::alloc_impl (68 samples, 0.05%)alloc::alloc::alloc_zeroed (68 samples, 0.05%)__rdl_alloc_zeroed (68 samples, 0.05%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (68 samples, 0.05%)[unknown] (48 samples, 0.04%)[unknown] (16 samples, 0.01%)[unknown] (28 samples, 0.02%)std::sys::pal::unix::cvt (134 samples, 0.10%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (134 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (1,908 samples, 1.45%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (504 samples, 0.38%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (382 samples, 0.29%)tokio::net::udp::UdpSocket::send_to::{{closure}} (344 samples, 0.26%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (332 samples, 0.25%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (304 samples, 0.23%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (215 samples, 0.16%)mio::net::udp::UdpSocket::send_to (185 samples, 0.14%)mio::io_source::IoSource<T>::do_io (185 samples, 0.14%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (185 samples, 0.14%)mio::net::udp::UdpSocket::send_to::{{closure}} (185 samples, 0.14%)std::net::udp::UdpSocket::send_to (185 samples, 0.14%)std::sys_common::net::UdpSocket::send_to (169 samples, 0.13%)alloc::vec::Vec<T>::with_capacity (17 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (17 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (104 samples, 0.08%)tokio::net::udp::UdpSocket::ready::{{closure}} (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (190 samples, 0.14%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (49 samples, 0.04%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (28 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (330 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (327 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (92 samples, 0.07%)tokio::task::spawn::spawn (92 samples, 0.07%)tokio::task::spawn::spawn_inner (92 samples, 0.07%)tokio::runtime::context::current::with_current (92 samples, 0.07%)std::thread::local::LocalKey<T>::try_with (92 samples, 0.07%)tokio::runtime::context::current::with_current::{{closure}} (92 samples, 0.07%)core::option::Option<T>::map (92 samples, 0.07%)tokio::task::spawn::spawn_inner::{{closure}} (92 samples, 0.07%)tokio::runtime::scheduler::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (92 samples, 0.07%)tokio::runtime::task::list::OwnedTasks<S>::bind (90 samples, 0.07%)tokio::runtime::task::new_task (89 samples, 0.07%)tokio::runtime::task::raw::RawTask::new (89 samples, 0.07%)tokio::runtime::task::core::Cell<T,S>::new (89 samples, 0.07%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (34 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (27 samples, 0.02%)alloc::sync::Arc<T>::new (21 samples, 0.02%)alloc::boxed::Box<T>::new (21 samples, 0.02%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (152 samples, 0.12%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (125 samples, 0.10%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (88 samples, 0.07%)core::option::Option<T>::is_some_and (18 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (17 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (17 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (17 samples, 0.01%)std::sync::rwlock::RwLock<T>::read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::read (16 samples, 0.01%)tracing::span::Span::log (26 samples, 0.02%)core::fmt::Arguments::new_v1 (15 samples, 0.01%)tracing_core::span::Record::is_empty (34 samples, 0.03%)tracing_core::field::ValueSet::is_empty (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::all (22 samples, 0.02%)tracing_core::field::ValueSet::is_empty::{{closure}} (18 samples, 0.01%)core::option::Option<T>::is_none (16 samples, 0.01%)core::option::Option<T>::is_some (16 samples, 0.01%)tracing::span::Span::record_all (143 samples, 0.11%)unlink_chunk (185 samples, 0.14%)uuid::builder::Builder::with_variant (48 samples, 0.04%)[unknown] (40 samples, 0.03%)uuid::builder::Builder::from_random_bytes (77 samples, 0.06%)uuid::builder::Builder::with_version (29 samples, 0.02%)[unknown] (24 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)[unknown] (92 samples, 0.07%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (162 samples, 0.12%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (162 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (162 samples, 0.12%)[unknown] (18,233 samples, 13.89%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (270 samples, 0.21%)uuid::rng::bytes (190 samples, 0.14%)rand::random (190 samples, 0.14%)__memcpy_avx512_unaligned_erms (69 samples, 0.05%)_int_free (23 samples, 0.02%)_int_malloc (23 samples, 0.02%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)advise_stack_range (31 samples, 0.02%)__GI_madvise (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (31 samples, 0.02%)syscall (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sync::condvar::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (35 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (56 samples, 0.04%)std::sys::pal::unix::futex::futex_wait (56 samples, 0.04%)syscall (56 samples, 0.04%)[unknown] (56 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (53 samples, 0.04%)[unknown] (52 samples, 0.04%)[unknown] (46 samples, 0.04%)[unknown] (39 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[[vdso]] (26 samples, 0.02%)[[vdso]] (263 samples, 0.20%)__ieee754_pow_fma (26 samples, 0.02%)__pow (314 samples, 0.24%)std::f64::<impl f64>::powf (345 samples, 0.26%)__GI___clock_gettime (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (416 samples, 0.32%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (24 samples, 0.02%)std::time::Instant::now (18 samples, 0.01%)std::sys::pal::unix::time::Instant::now (18 samples, 0.01%)mio::poll::Poll::poll (102 samples, 0.08%)mio::sys::unix::selector::epoll::Selector::select (102 samples, 0.08%)epoll_wait (99 samples, 0.08%)[unknown] (92 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (88 samples, 0.07%)[unknown] (85 samples, 0.06%)[unknown] (84 samples, 0.06%)[unknown] (43 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (125 samples, 0.10%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (125 samples, 0.10%)tokio::runtime::driver::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::driver::TimeDriver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_internal (116 samples, 0.09%)tokio::runtime::io::driver::Driver::turn (116 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (148 samples, 0.11%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (111 samples, 0.08%)alloc::sync::Arc<T,A>::inner (111 samples, 0.08%)core::ptr::non_null::NonNull<T>::as_ref (111 samples, 0.08%)core::sync::atomic::AtomicUsize::compare_exchange (16 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (16 samples, 0.01%)core::bool::<impl bool>::then (88 samples, 0.07%)std::sys::pal::unix::futex::futex_wait (13,339 samples, 10.16%)std::sys::pal::..syscall (13,003 samples, 9.90%)syscall[unknown] (12,895 samples, 9.82%)[unknown][unknown] (12,759 samples, 9.72%)[unknown][unknown] (12,313 samples, 9.38%)[unknown][unknown] (12,032 samples, 9.16%)[unknown][unknown] (11,734 samples, 8.94%)[unknown][unknown] (11,209 samples, 8.54%)[unknown][unknown] (10,265 samples, 7.82%)[unknown][unknown] (9,345 samples, 7.12%)[unknown][unknown] (8,623 samples, 6.57%)[unknown][unknown] (7,744 samples, 5.90%)[unknow..[unknown] (5,922 samples, 4.51%)[unkn..[unknown] (4,459 samples, 3.40%)[un..[unknown] (2,808 samples, 2.14%)[..[unknown] (1,275 samples, 0.97%)[unknown] (1,022 samples, 0.78%)[unknown] (738 samples, 0.56%)[unknown] (607 samples, 0.46%)[unknown] (155 samples, 0.12%)core::result::Result<T,E>::is_err (77 samples, 0.06%)core::result::Result<T,E>::is_ok (77 samples, 0.06%)std::sync::condvar::Condvar::wait (13,429 samples, 10.23%)std::sync::cond..std::sys::sync::condvar::futex::Condvar::wait (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::mutex::futex::Mutex::lock (89 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (13,508 samples, 10.29%)tokio::runtime:..tokio::loom::std::mutex::Mutex<T>::lock (64 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (31 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (30 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (30 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (34 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park (34 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (17 samples, 0.01%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (19 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (33 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::level_range (17 samples, 0.01%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (95 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (41 samples, 0.03%)core::num::<impl usize>::pow (41 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (129 samples, 0.10%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (202 samples, 0.15%)tokio::runtime::time::wheel::Wheel::poll_at (17 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (38 samples, 0.03%)core::option::Option<T>::map (38 samples, 0.03%)core::result::Result<T,E>::map (31 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (31 samples, 0.02%)alloc::vec::Vec<T,A>::set_len (17 samples, 0.01%)[[vdso]] (28 samples, 0.02%)[unknown] (11,031 samples, 8.40%)[unknown][unknown] (10,941 samples, 8.33%)[unknown][unknown] (10,850 samples, 8.26%)[unknown][unknown] (10,691 samples, 8.14%)[unknown][unknown] (10,070 samples, 7.67%)[unknown][unknown] (9,737 samples, 7.42%)[unknown][unknown] (7,659 samples, 5.83%)[unknow..[unknown] (6,530 samples, 4.97%)[unkno..[unknown] (5,633 samples, 4.29%)[unkn..[unknown] (5,055 samples, 3.85%)[unk..[unknown] (4,046 samples, 3.08%)[un..[unknown] (2,911 samples, 2.22%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,226 samples, 0.93%)[unknown] (455 samples, 0.35%)[unknown] (408 samples, 0.31%)[unknown] (249 samples, 0.19%)[unknown] (202 samples, 0.15%)[unknown] (100 samples, 0.08%)mio::poll::Poll::poll (11,328 samples, 8.63%)mio::poll::P..mio::sys::unix::selector::epoll::Selector::select (11,328 samples, 8.63%)mio::sys::un..epoll_wait (11,229 samples, 8.55%)epoll_wait__GI___pthread_disable_asynccancel (50 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (47 samples, 0.04%)tokio::util::bit::Pack::pack (38 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (25 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (23 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (19 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (11,595 samples, 8.83%)tokio::runti..tokio::runtime::io::scheduled_io::ScheduledIo::wake (175 samples, 0.13%)__GI___clock_gettime (15 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (18 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (26 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (26 samples, 0.02%)tokio::time::clock::Clock::now (20 samples, 0.02%)tokio::time::clock::now (20 samples, 0.02%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (17 samples, 0.01%)tokio::runtime::time::Driver::park_internal (11,686 samples, 8.90%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (11,957 samples, 9.11%)tokio::runtim..tokio::runtime::driver::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::driver::TimeDriver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::time::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Parker::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::park::Inner::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (25,547 samples, 19.46%)tokio::runtime::scheduler::mul..core::result::Result<T,E>::is_err (14 samples, 0.01%)core::result::Result<T,E>::is_ok (14 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (45 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (45 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (81 samples, 0.06%)std::sys::sync::mutex::futex::Mutex::lock (73 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (122 samples, 0.09%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (241 samples, 0.18%)<T as core::slice::cmp::SliceContains>::slice_contains (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (75 samples, 0.06%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (75 samples, 0.06%)core::sync::atomic::AtomicU32::compare_exchange (20 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (283 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (24 samples, 0.02%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (33 samples, 0.03%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (33 samples, 0.03%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (98 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (401 samples, 0.31%)alloc::vec::Vec<T,A>::push (14 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (15 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)core::result::Result<T,E>::is_err (15 samples, 0.01%)core::result::Result<T,E>::is_ok (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (22 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (22 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (63 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (62 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (14 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (21 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (17 samples, 0.01%)alloc::sync::Arc<T,A>::inner (17 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (17 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (68 samples, 0.05%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (33 samples, 0.03%)core::sync::atomic::AtomicU64::load (16 samples, 0.01%)core::sync::atomic::atomic_load (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::park (26,672 samples, 20.31%)tokio::runtime::scheduler::multi..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (272 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::Core::has_tasks (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::has_tasks (24 samples, 0.02%)tokio::runtime::context::budget (18 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (18 samples, 0.01%)syscall (61 samples, 0.05%)__memcpy_avx512_unaligned_erms (172 samples, 0.13%)__memcpy_avx512_unaligned_erms (224 samples, 0.17%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (228 samples, 0.17%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (228 samples, 0.17%)std::panic::catch_unwind (415 samples, 0.32%)std::panicking::try (415 samples, 0.32%)std::panicking::try::do_call (415 samples, 0.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (415 samples, 0.32%)core::ops::function::FnOnce::call_once (415 samples, 0.32%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::set_stage (410 samples, 0.31%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (27 samples, 0.02%)core::result::Result<T,E>::is_err (43 samples, 0.03%)core::result::Result<T,E>::is_ok (43 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::complete (570 samples, 0.43%)tokio::runtime::task::harness::Harness<T,S>::release (155 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (152 samples, 0.12%)tokio::runtime::task::list::OwnedTasks<S>::remove (152 samples, 0.12%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (103 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (65 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (54 samples, 0.04%)std::io::stdio::stderr::INSTANCE (17 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (70 samples, 0.05%)__memcpy_avx512_unaligned_erms (42 samples, 0.03%)core::cmp::Ord::min (22 samples, 0.02%)core::cmp::min_by (22 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (27 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (30 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (24 samples, 0.02%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (44 samples, 0.03%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (20 samples, 0.02%)byteorder::io::ReadBytesExt::read_i32 (46 samples, 0.04%)core::cmp::Ord::min (14 samples, 0.01%)core::cmp::min_by (14 samples, 0.01%)std::io::cursor::Cursor<T>::remaining_slice (19 samples, 0.01%)byteorder::io::ReadBytesExt::read_i64 (24 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (24 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (349 samples, 0.27%)__GI___lll_lock_wake_private (148 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (137 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (111 samples, 0.08%)[unknown] (98 samples, 0.07%)[unknown] (42 samples, 0.03%)[unknown] (30 samples, 0.02%)__GI___lll_lock_wait_private (553 samples, 0.42%)futex_wait (541 samples, 0.41%)[unknown] (536 samples, 0.41%)[unknown] (531 samples, 0.40%)[unknown] (524 samples, 0.40%)[unknown] (515 samples, 0.39%)[unknown] (498 samples, 0.38%)[unknown] (470 samples, 0.36%)[unknown] (435 samples, 0.33%)[unknown] (350 samples, 0.27%)[unknown] (327 samples, 0.25%)[unknown] (290 samples, 0.22%)[unknown] (222 samples, 0.17%)[unknown] (160 samples, 0.12%)[unknown] (104 samples, 0.08%)[unknown] (33 samples, 0.03%)[unknown] (25 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (703 samples, 0.54%)__GI___libc_free (866 samples, 0.66%)tracing::span::Span::record_all (30 samples, 0.02%)unlink_chunk (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (899 samples, 0.68%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (899 samples, 0.68%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (899 samples, 0.68%)alloc::alloc::dealloc (899 samples, 0.68%)__rdl_dealloc (899 samples, 0.68%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (899 samples, 0.68%)core::result::Result<T,E>::expect (91 samples, 0.07%)core::result::Result<T,E>::map_err (28 samples, 0.02%)[[vdso]] (28 samples, 0.02%)__GI___clock_gettime (47 samples, 0.04%)std::time::Instant::elapsed (67 samples, 0.05%)std::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (53 samples, 0.04%)std::sys::pal::unix::cvt (23 samples, 0.02%)__GI_getsockname (3,792 samples, 2.89%)__..[unknown] (3,714 samples, 2.83%)[u..[unknown] (3,661 samples, 2.79%)[u..[unknown] (3,557 samples, 2.71%)[u..[unknown] (3,416 samples, 2.60%)[u..[unknown] (2,695 samples, 2.05%)[..[unknown] (2,063 samples, 1.57%)[unknown] (891 samples, 0.68%)[unknown] (270 samples, 0.21%)[unknown] (99 samples, 0.08%)[unknown] (94 samples, 0.07%)[unknown] (84 samples, 0.06%)[unknown] (77 samples, 0.06%)[unknown] (25 samples, 0.02%)[unknown] (16 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr::{{closure}} (3,800 samples, 2.89%)st..tokio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)to..mio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)mi..std::net::tcp::TcpListener::local_addr (3,838 samples, 2.92%)st..std::sys_common::net::TcpListener::socket_addr (3,838 samples, 2.92%)st..std::sys_common::net::sockname (3,835 samples, 2.92%)st..[[vdso]] (60 samples, 0.05%)rand_chacha::guts::ChaCha::pos64 (168 samples, 0.13%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (26 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (29 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (18 samples, 0.01%)rand_chacha::guts::round (118 samples, 0.09%)rand_chacha::guts::refill_wide::impl_avx2 (312 samples, 0.24%)rand_chacha::guts::refill_wide::fn_impl (312 samples, 0.24%)rand_chacha::guts::refill_wide_impl (312 samples, 0.24%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (384 samples, 0.29%)rand_chacha::guts::ChaCha::refill4 (384 samples, 0.29%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (432 samples, 0.33%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (432 samples, 0.33%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)rand_core::block::BlockRng<R>::generate_and_set (392 samples, 0.30%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (392 samples, 0.30%)torrust_tracker::servers::udp::handlers::RequestId::make (440 samples, 0.34%)uuid::v4::<impl uuid::Uuid>::new_v4 (436 samples, 0.33%)uuid::rng::bytes (435 samples, 0.33%)rand::random (435 samples, 0.33%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (22 samples, 0.02%)core::iter::traits::iterator::Iterator::collect (16 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (16 samples, 0.01%)<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.01%)core::iter::traits::iterator::Iterator::find (15 samples, 0.01%)core::iter::traits::iterator::Iterator::try_fold (15 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (31 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)core::slice::iter::Iter<T>::post_inc_start (14 samples, 0.01%)core::ptr::non_null::NonNull<T>::add (14 samples, 0.01%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (26 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (165 samples, 0.13%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (165 samples, 0.13%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (165 samples, 0.13%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (165 samples, 0.13%)<u8 as core::slice::cmp::SliceOrd>::compare (165 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (339 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (308 samples, 0.23%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (308 samples, 0.23%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (342 samples, 0.26%)std::sys::sync::rwlock::futex::RwLock::spin_read (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (28 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (436 samples, 0.33%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (397 samples, 0.30%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (29 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (29 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (29 samples, 0.02%)__memcmp_evex_movbe (31 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (52 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (52 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (52 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (52 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (52 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (103 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (102 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (96 samples, 0.07%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (96 samples, 0.07%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (72 samples, 0.05%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)core::slice::iter::Iter<T>::post_inc_start (32 samples, 0.02%)core::ptr::non_null::NonNull<T>::add (32 samples, 0.02%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (81 samples, 0.06%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (271 samples, 0.21%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (271 samples, 0.21%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (271 samples, 0.21%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (271 samples, 0.21%)<u8 as core::slice::cmp::SliceOrd>::compare (271 samples, 0.21%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (610 samples, 0.46%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (566 samples, 0.43%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (566 samples, 0.43%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (18 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (616 samples, 0.47%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::KV>::split (15 samples, 0.01%)alloc::collections::btree::map::entry::Entry<K,V,A>::or_insert (46 samples, 0.04%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (45 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (40 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (29 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (120 samples, 0.09%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (118 samples, 0.09%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::Leaf>::new_leaf (118 samples, 0.09%)alloc::collections::btree::node::LeafNode<K,V>::new (118 samples, 0.09%)alloc::boxed::Box<T,A>::new_uninit_in (118 samples, 0.09%)alloc::boxed::Box<T,A>::try_new_uninit_in (118 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (118 samples, 0.09%)alloc::alloc::Global::alloc_impl (118 samples, 0.09%)alloc::alloc::alloc (118 samples, 0.09%)__rdl_alloc (118 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (118 samples, 0.09%)__GI___libc_malloc (118 samples, 0.09%)_int_malloc (107 samples, 0.08%)_int_malloc (28 samples, 0.02%)__GI___libc_malloc (32 samples, 0.02%)__rdl_alloc (36 samples, 0.03%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (36 samples, 0.03%)alloc::sync::Arc<T>::new (42 samples, 0.03%)alloc::boxed::Box<T>::new (42 samples, 0.03%)alloc::alloc::exchange_malloc (39 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (39 samples, 0.03%)alloc::alloc::Global::alloc_impl (39 samples, 0.03%)alloc::alloc::alloc (39 samples, 0.03%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)__GI___libc_free (39 samples, 0.03%)_int_free (37 samples, 0.03%)get_max_fast (16 samples, 0.01%)core::option::Option<T>::is_some_and (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (50 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (50 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (290 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (284 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (255 samples, 0.19%)std::sys::sync::rwlock::futex::RwLock::spin_read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::spin_until (16 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (21 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (21 samples, 0.02%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (1,147 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (1,144 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents_mut (32 samples, 0.02%)std::sync::rwlock::RwLock<T>::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write_contended (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_write (28 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (28 samples, 0.02%)torrust_tracker::core::Tracker::announce::{{closure}} (1,597 samples, 1.22%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (29 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (24 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (25 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (25 samples, 0.02%)core::hash::Hasher::write_u32 (25 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (36 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (37 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (37 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (64 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (39 samples, 0.03%)core::hash::Hasher::write_u64 (39 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (122 samples, 0.09%)core::hash::impls::<impl core::hash::Hash for u64>::hash (58 samples, 0.04%)core::hash::Hasher::write_u64 (58 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (57 samples, 0.04%)core::hash::sip::u8to64_le (23 samples, 0.02%)core::hash::Hasher::write_length_prefix (27 samples, 0.02%)core::hash::Hasher::write_usize (27 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (16 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (246 samples, 0.19%)core::array::<impl core::hash::Hash for [T: N]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (62 samples, 0.05%)core::hash::sip::u8to64_le (17 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::check (285 samples, 0.22%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (36 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (24 samples, 0.02%)std::time::SystemTime::now (19 samples, 0.01%)std::sys::pal::unix::time::SystemTime::now (19 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (1,954 samples, 1.49%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (24 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (18 samples, 0.01%)<core::time::Nanoseconds as core::hash::Hash>::hash (20 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (20 samples, 0.02%)core::hash::Hasher::write_u32 (20 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (44 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (65 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (45 samples, 0.03%)core::hash::Hasher::write_u64 (45 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (45 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (45 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (105 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (40 samples, 0.03%)core::hash::Hasher::write_u64 (40 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (39 samples, 0.03%)core::hash::Hasher::write_length_prefix (34 samples, 0.03%)core::hash::Hasher::write_usize (34 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (33 samples, 0.03%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (231 samples, 0.18%)core::array::<impl core::hash::Hash for [T: N]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (61 samples, 0.05%)core::hash::sip::u8to64_le (16 samples, 0.01%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (270 samples, 0.21%)torrust_tracker::servers::udp::connection_cookie::make (268 samples, 0.20%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (35 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (31 samples, 0.02%)std::time::SystemTime::now (26 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (26 samples, 0.02%)torrust_tracker::core::ScrapeData::add_file (19 samples, 0.01%)std::collections::hash::map::HashMap<K,V,S>::insert (19 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (19 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (16 samples, 0.01%)hashbrown::raw::RawTable<T,A>::reserve (16 samples, 0.01%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (17 samples, 0.01%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (17 samples, 0.01%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (17 samples, 0.01%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (17 samples, 0.01%)<u8 as core::slice::cmp::SliceOrd>::compare (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (2,336 samples, 1.78%)t..torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (101 samples, 0.08%)torrust_tracker::core::Tracker::scrape::{{closure}} (90 samples, 0.07%)torrust_tracker::core::Tracker::get_swarm_metadata (68 samples, 0.05%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (64 samples, 0.05%)alloc::raw_vec::finish_grow (19 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (21 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (23 samples, 0.02%)alloc::string::String::push_str (23 samples, 0.02%)alloc::vec::Vec<T,A>::extend_from_slice (23 samples, 0.02%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (23 samples, 0.02%)alloc::vec::Vec<T,A>::append_elements (23 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (85 samples, 0.06%)core::fmt::num::imp::fmt_u64 (78 samples, 0.06%)<alloc::string::String as core::fmt::Write>::write_str (15 samples, 0.01%)alloc::string::String::push_str (15 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (15 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (15 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (37 samples, 0.03%)core::fmt::num::imp::fmt_u64 (36 samples, 0.03%)<T as alloc::string::ToString>::to_string (141 samples, 0.11%)core::option::Option<T>::expect (34 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (28 samples, 0.02%)alloc::alloc::dealloc (28 samples, 0.02%)__rdl_dealloc (28 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (28 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (55 samples, 0.04%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (55 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::current_memory (20 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (16 samples, 0.01%)binascii::bin2hex (51 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (16 samples, 0.01%)core::fmt::write (25 samples, 0.02%)core::fmt::rt::Argument::fmt (15 samples, 0.01%)core::fmt::Formatter::write_fmt (87 samples, 0.07%)core::str::converts::from_utf8 (43 samples, 0.03%)core::str::validations::run_utf8_validation (37 samples, 0.03%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (161 samples, 0.12%)<T as alloc::string::ToString>::to_string (161 samples, 0.12%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (156 samples, 0.12%)torrust_tracker::servers::udp::logging::log_request (479 samples, 0.36%)[[vdso]] (51 samples, 0.04%)alloc::raw_vec::finish_grow (56 samples, 0.04%)alloc::vec::Vec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::grow_amortized (64 samples, 0.05%)<alloc::string::String as core::fmt::Write>::write_str (65 samples, 0.05%)alloc::string::String::push_str (65 samples, 0.05%)alloc::vec::Vec<T,A>::extend_from_slice (65 samples, 0.05%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (65 samples, 0.05%)alloc::vec::Vec<T,A>::append_elements (65 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (114 samples, 0.09%)core::fmt::num::imp::fmt_u64 (110 samples, 0.08%)<T as alloc::string::ToString>::to_string (132 samples, 0.10%)core::option::Option<T>::expect (20 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (22 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (22 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (8,883 samples, 6.77%)torrust_t..torrust_tracker::servers::udp::logging::log_response (238 samples, 0.18%)__GI___lll_lock_wait_private (14 samples, 0.01%)futex_wait (14 samples, 0.01%)__GI___lll_lock_wake_private (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)_int_malloc (191 samples, 0.15%)__libc_calloc (238 samples, 0.18%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)alloc::vec::from_elem (316 samples, 0.24%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (316 samples, 0.24%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (312 samples, 0.24%)alloc::alloc::Global::alloc_impl (312 samples, 0.24%)alloc::alloc::alloc_zeroed (312 samples, 0.24%)__rdl_alloc_zeroed (312 samples, 0.24%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (312 samples, 0.24%)byteorder::ByteOrder::write_i32 (18 samples, 0.01%)<byteorder::BigEndian as byteorder::ByteOrder>::write_u32 (18 samples, 0.01%)core::num::<impl u32>::to_be_bytes (18 samples, 0.01%)core::num::<impl u32>::to_be (18 samples, 0.01%)core::num::<impl u32>::swap_bytes (18 samples, 0.01%)byteorder::io::WriteBytesExt::write_i32 (89 samples, 0.07%)std::io::Write::write_all (71 samples, 0.05%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (71 samples, 0.05%)std::io::cursor::vec_write (71 samples, 0.05%)std::io::cursor::vec_write_unchecked (51 samples, 0.04%)core::ptr::mut_ptr::<impl *mut T>::copy_from (51 samples, 0.04%)core::intrinsics::copy (51 samples, 0.04%)aquatic_udp_protocol::response::Response::write (227 samples, 0.17%)byteorder::io::WriteBytesExt::write_i64 (28 samples, 0.02%)std::io::Write::write_all (21 samples, 0.02%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (21 samples, 0.02%)std::io::cursor::vec_write (21 samples, 0.02%)std::io::cursor::vec_write_unchecked (21 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::copy_from (21 samples, 0.02%)core::intrinsics::copy (21 samples, 0.02%)__GI___lll_lock_wake_private (17 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (136 samples, 0.10%)__GI___libc_free (206 samples, 0.16%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (211 samples, 0.16%)alloc::alloc::dealloc (211 samples, 0.16%)__rdl_dealloc (211 samples, 0.16%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (211 samples, 0.16%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (224 samples, 0.17%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (224 samples, 0.17%)std::io::cursor::Cursor<T>::new (56 samples, 0.04%)tokio::io::ready::Ready::intersection (23 samples, 0.02%)tokio::io::ready::Ready::from_interest (23 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (83 samples, 0.06%)[unknown] (32,674 samples, 24.88%)[unknown][unknown] (32,402 samples, 24.68%)[unknown][unknown] (32,272 samples, 24.58%)[unknown][unknown] (32,215 samples, 24.54%)[unknown][unknown] (31,174 samples, 23.74%)[unknown][unknown] (30,794 samples, 23.45%)[unknown][unknown] (30,036 samples, 22.88%)[unknown][unknown] (28,639 samples, 21.81%)[unknown][unknown] (27,908 samples, 21.25%)[unknown][unknown] (26,013 samples, 19.81%)[unknown][unknown] (23,181 samples, 17.65%)[unknown][unknown] (19,559 samples, 14.90%)[unknown][unknown] (18,052 samples, 13.75%)[unknown][unknown] (15,794 samples, 12.03%)[unknown][unknown] (14,740 samples, 11.23%)[unknown][unknown] (12,486 samples, 9.51%)[unknown][unknown] (11,317 samples, 8.62%)[unknown][unknown] (10,725 samples, 8.17%)[unknown][unknown] (10,017 samples, 7.63%)[unknown][unknown] (9,713 samples, 7.40%)[unknown][unknown] (8,432 samples, 6.42%)[unknown][unknown] (8,062 samples, 6.14%)[unknown][unknown] (6,973 samples, 5.31%)[unknow..[unknown] (5,328 samples, 4.06%)[unk..[unknown] (4,352 samples, 3.31%)[un..[unknown] (3,786 samples, 2.88%)[u..[unknown] (3,659 samples, 2.79%)[u..[unknown] (3,276 samples, 2.50%)[u..[unknown] (2,417 samples, 1.84%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,610 samples, 1.23%)[unknown] (422 samples, 0.32%)[unknown] (84 samples, 0.06%)[unknown] (69 samples, 0.05%)__GI___pthread_disable_asynccancel (67 samples, 0.05%)__libc_sendto (32,896 samples, 25.05%)__libc_sendtotokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (32,981 samples, 25.12%)tokio::net::udp::UdpSocket::send_to_addr..mio::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (32,981 samples, 25.12%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (32,981 samples, 25.12%)mio::sys::unix::stateless_io_source::IoS..mio::net::udp::UdpSocket::send_to::{{closure}} (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_to::{{clo..std::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (32,981 samples, 25.12%)std::sys_common::net::UdpSocket::send_tostd::sys::pal::unix::cvt (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (44,349 samples, 33.78%)torrust_tracker::servers::udp::server::Udp::process_req..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (43,412 samples, 33.06%)torrust_tracker::servers::udp::server::Udp::process_va..torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (34,320 samples, 26.14%)torrust_tracker::servers::udp::server::Udp..torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (33,360 samples, 25.41%)torrust_tracker::servers::udp::server::Ud..tokio::net::udp::UdpSocket::send_to::{{closure}} (33,227 samples, 25.31%)tokio::net::udp::UdpSocket::send_to::{{c..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (33,142 samples, 25.24%)tokio::net::udp::UdpSocket::send_to_addr..tokio::runtime::io::registration::Registration::async_io::{{closure}} (33,115 samples, 25.22%)tokio::runtime::io::registration::Regist..tokio::runtime::io::registration::Registration::readiness::{{closure}} (28 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (15 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (14 samples, 0.01%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (15 samples, 0.01%)core::sync::atomic::atomic_add (15 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (135 samples, 0.10%)__GI___libc_free (147 samples, 0.11%)syscall (22 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (15 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (24 samples, 0.02%)core::mem::drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (262 samples, 0.20%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (262 samples, 0.20%)tokio::runtime::task::raw::RawTask::drop_abort_handle (256 samples, 0.19%)tokio::runtime::task::raw::drop_abort_handle (59 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (50 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (50 samples, 0.04%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (16 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (47 samples, 0.04%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (47 samples, 0.04%)tokio::runtime::task::state::State::drop_join_handle_fast (19 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (19 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (14 samples, 0.01%)<ringbuf::ring_buffer::shared::SharedRb<T,C> as ringbuf::ring_buffer::base::RbBase<T>>::head (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)ringbuf::consumer::Consumer<T,R>::advance (29 samples, 0.02%)ringbuf::ring_buffer::base::RbRead::advance_head (29 samples, 0.02%)ringbuf::ring_buffer::rb::Rb::pop (50 samples, 0.04%)ringbuf::consumer::Consumer<T,R>::pop (50 samples, 0.04%)ringbuf::producer::Producer<T,R>::advance (23 samples, 0.02%)ringbuf::ring_buffer::base::RbWrite::advance_tail (23 samples, 0.02%)core::num::nonzero::<impl core::ops::arith::Rem<core::num::nonzero::NonZero<usize>> for usize>::rem (19 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (107 samples, 0.08%)ringbuf::ring_buffer::rb::Rb::push (43 samples, 0.03%)ringbuf::producer::Producer<T,R>::push (43 samples, 0.03%)tokio::runtime::task::abort::AbortHandle::is_finished (84 samples, 0.06%)tokio::runtime::task::state::Snapshot::is_complete (84 samples, 0.06%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (17 samples, 0.01%)tokio::runtime::task::raw::RawTask::ref_inc (17 samples, 0.01%)tokio::runtime::task::state::State::ref_inc (17 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (14 samples, 0.01%)core::sync::atomic::atomic_add (14 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)malloc_consolidate (95 samples, 0.07%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (76 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (26 samples, 0.02%)_int_malloc (282 samples, 0.21%)__GI___libc_malloc (323 samples, 0.25%)alloc::vec::Vec<T>::with_capacity (326 samples, 0.25%)alloc::vec::Vec<T,A>::with_capacity_in (326 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (324 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (324 samples, 0.25%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (324 samples, 0.25%)alloc::alloc::Global::alloc_impl (324 samples, 0.25%)alloc::alloc::alloc (324 samples, 0.25%)__rdl_alloc (324 samples, 0.25%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (324 samples, 0.25%)tokio::io::ready::Ready::intersection (24 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (199 samples, 0.15%)tokio::util::bit::Pack::unpack (16 samples, 0.01%)tokio::util::bit::unpack (16 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (19 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (16 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (222 samples, 0.17%)tokio::net::udp::UdpSocket::ready::{{closure}} (222 samples, 0.17%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (50 samples, 0.04%)std::io::error::repr_bitpacked::Repr::data (14 samples, 0.01%)std::io::error::repr_bitpacked::decode_repr (14 samples, 0.01%)std::io::error::Error::kind (16 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)[unknown] (8,756 samples, 6.67%)[unknown][unknown] (8,685 samples, 6.61%)[unknown][unknown] (8,574 samples, 6.53%)[unknown][unknown] (8,415 samples, 6.41%)[unknown][unknown] (7,686 samples, 5.85%)[unknow..[unknown] (7,239 samples, 5.51%)[unknow..[unknown] (6,566 samples, 5.00%)[unkno..[unknown] (5,304 samples, 4.04%)[unk..[unknown] (4,008 samples, 3.05%)[un..[unknown] (3,571 samples, 2.72%)[u..[unknown] (2,375 samples, 1.81%)[..[unknown] (1,844 samples, 1.40%)[unknown] (1,030 samples, 0.78%)[unknown] (344 samples, 0.26%)[unknown] (113 samples, 0.09%)__libc_recvfrom (8,903 samples, 6.78%)__libc_re..__GI___pthread_disable_asynccancel (22 samples, 0.02%)std::sys::pal::unix::cvt (20 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (9,005 samples, 6.86%)tokio::ne..mio::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)mio::net:..mio::io_source::IoSource<T>::do_io (8,964 samples, 6.83%)mio::io_s..mio::sys::unix::stateless_io_source::IoSourceState::do_io (8,964 samples, 6.83%)mio::sys:..mio::net::udp::UdpSocket::recv_from::{{closure}} (8,964 samples, 6.83%)mio::net:..std::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)std::net:..std::sys_common::net::UdpSocket::recv_from (8,964 samples, 6.83%)std::sys_..std::sys::pal::unix::net::Socket::recv_from (8,964 samples, 6.83%)std::sys:..std::sys::pal::unix::net::Socket::recv_from_with_flags (8,964 samples, 6.83%)std::sys:..std::sys_common::net::sockaddr_to_addr (23 samples, 0.02%)tokio::runtime::io::registration::Registration::clear_readiness (18 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (32 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (9,967 samples, 7.59%)torrust_tr..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (9,291 samples, 7.08%)tokio::ne..tokio::runtime::io::registration::Registration::async_io::{{closure}} (9,287 samples, 7.07%)tokio::ru..tokio::runtime::io::registration::Registration::readiness::{{closure}} (45 samples, 0.03%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (41 samples, 0.03%)__memcpy_avx512_unaligned_erms (424 samples, 0.32%)__memcpy_avx512_unaligned_erms (493 samples, 0.38%)__memcpy_avx512_unaligned_erms (298 samples, 0.23%)syscall (1,105 samples, 0.84%)[unknown] (1,095 samples, 0.83%)[unknown] (1,091 samples, 0.83%)[unknown] (1,049 samples, 0.80%)[unknown] (998 samples, 0.76%)[unknown] (907 samples, 0.69%)[unknown] (710 samples, 0.54%)[unknown] (635 samples, 0.48%)[unknown] (538 samples, 0.41%)[unknown] (358 samples, 0.27%)[unknown] (256 samples, 0.19%)[unknown] (153 samples, 0.12%)[unknown] (96 samples, 0.07%)[unknown] (81 samples, 0.06%)tokio::runtime::context::with_scheduler (36 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (31 samples, 0.02%)tokio::runtime::context::with_scheduler::{{closure}} (27 samples, 0.02%)tokio::runtime::context::scoped::Scoped<T>::with (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (340 samples, 0.26%)core::sync::atomic::atomic_add (340 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (354 samples, 0.27%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (367 samples, 0.28%)[unknown] (95 samples, 0.07%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (90 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (73 samples, 0.06%)[unknown] (63 samples, 0.05%)[unknown] (44 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (35 samples, 0.03%)[unknown] (30 samples, 0.02%)[unknown] (22 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)tokio::runtime::driver::Handle::unpark (99 samples, 0.08%)tokio::runtime::driver::IoHandle::unpark (99 samples, 0.08%)tokio::runtime::io::driver::Handle::unpark (99 samples, 0.08%)mio::waker::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::fdbased::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::eventfd::WakerInternal::wake (99 samples, 0.08%)<&std::fs::File as std::io::Write>::write (99 samples, 0.08%)std::sys::pal::unix::fs::File::write (99 samples, 0.08%)std::sys::pal::unix::fd::FileDesc::write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)tokio::runtime::context::with_scheduler (1,615 samples, 1.23%)std::thread::local::LocalKey<T>::try_with (1,613 samples, 1.23%)tokio::runtime::context::with_scheduler::{{closure}} (1,612 samples, 1.23%)tokio::runtime::context::scoped::Scoped<T>::with (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (1,647 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (1,646 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::with_current (1,646 samples, 1.25%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (23 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (18 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (104 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (60 samples, 0.05%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (57 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (38 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (162 samples, 0.12%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)__GI___lll_lock_wake_private (127 samples, 0.10%)[unknown] (125 samples, 0.10%)[unknown] (124 samples, 0.09%)[unknown] (119 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (106 samples, 0.08%)[unknown] (87 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (51 samples, 0.04%)[unknown] (27 samples, 0.02%)[unknown] (19 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (77 samples, 0.06%)[unknown] (1,207 samples, 0.92%)[unknown] (1,146 samples, 0.87%)[unknown] (1,126 samples, 0.86%)[unknown] (1,091 samples, 0.83%)[unknown] (1,046 samples, 0.80%)[unknown] (962 samples, 0.73%)[unknown] (914 samples, 0.70%)[unknown] (848 samples, 0.65%)[unknown] (774 samples, 0.59%)[unknown] (580 samples, 0.44%)[unknown] (456 samples, 0.35%)[unknown] (305 samples, 0.23%)[unknown] (85 samples, 0.06%)__GI_mprotect (2,474 samples, 1.88%)_..[unknown] (2,457 samples, 1.87%)[..[unknown] (2,440 samples, 1.86%)[..[unknown] (2,436 samples, 1.86%)[..[unknown] (2,435 samples, 1.85%)[..[unknown] (2,360 samples, 1.80%)[..[unknown] (2,203 samples, 1.68%)[unknown] (1,995 samples, 1.52%)[unknown] (1,709 samples, 1.30%)[unknown] (1,524 samples, 1.16%)[unknown] (1,193 samples, 0.91%)[unknown] (865 samples, 0.66%)[unknown] (539 samples, 0.41%)[unknown] (259 samples, 0.20%)[unknown] (80 samples, 0.06%)[unknown] (29 samples, 0.02%)sysmalloc (3,786 samples, 2.88%)sy..grow_heap (2,509 samples, 1.91%)g.._int_malloc (4,038 samples, 3.08%)_in..unlink_chunk (31 samples, 0.02%)alloc::alloc::exchange_malloc (4,335 samples, 3.30%)all..<alloc::alloc::Global as core::alloc::Allocator>::allocate (4,329 samples, 3.30%)<al..alloc::alloc::Global::alloc_impl (4,329 samples, 3.30%)all..alloc::alloc::alloc (4,329 samples, 3.30%)all..__rdl_alloc (4,329 samples, 3.30%)__r..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (4,329 samples, 3.30%)std..std::sys::pal::unix::alloc::aligned_malloc (4,329 samples, 3.30%)std..__posix_memalign (4,297 samples, 3.27%)__p..__posix_memalign (4,297 samples, 3.27%)__p.._mid_memalign (4,297 samples, 3.27%)_mi.._int_memalign (4,149 samples, 3.16%)_in..sysmalloc (18 samples, 0.01%)core::option::Option<T>::map (6,666 samples, 5.08%)core::..tokio::task::spawn::spawn_inner::{{closure}} (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::Handle::spawn (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (6,664 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (6,661 samples, 5.07%)tokio:..tokio::runtime::task::list::OwnedTasks<S>::bind (4,692 samples, 3.57%)toki..tokio::runtime::task::new_task (4,579 samples, 3.49%)tok..tokio::runtime::task::raw::RawTask::new (4,579 samples, 3.49%)tok..tokio::runtime::task::core::Cell<T,S>::new (4,579 samples, 3.49%)tok..alloc::boxed::Box<T>::new (4,389 samples, 3.34%)all..tokio::runtime::context::current::with_current (7,636 samples, 5.82%)tokio::..std::thread::local::LocalKey<T>::try_with (7,635 samples, 5.81%)std::th..tokio::runtime::context::current::with_current::{{closure}} (7,188 samples, 5.47%)tokio::..tokio::task::spawn::spawn (7,670 samples, 5.84%)tokio::..tokio::task::spawn::spawn_inner (7,670 samples, 5.84%)tokio::..tokio::runtime::task::id::Id::next (24 samples, 0.02%)core::sync::atomic::AtomicU64::fetch_add (24 samples, 0.02%)core::sync::atomic::atomic_add (24 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (62,691 samples, 47.75%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (62,691 samples, 47.75%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (18,228 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (18,226 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (7,679 samples, 5.85%)torrust..__memcpy_avx512_unaligned_erms (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (407 samples, 0.31%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::poll (63,150 samples, 48.10%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (459 samples, 0.35%)tokio::runtime::task::core::Core<T,S>::set_stage (459 samples, 0.35%)__memcpy_avx512_unaligned_erms (16 samples, 0.01%)__memcpy_avx512_unaligned_erms (398 samples, 0.30%)__memcpy_avx512_unaligned_erms (325 samples, 0.25%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage (731 samples, 0.56%)tokio::runtime::task::harness::poll_future (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (63,908 samples, 48.67%)std::panic::catch_unwindstd::panicking::try (63,908 samples, 48.67%)std::panicking::trystd::panicking::try::do_call (63,908 samples, 48.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (63,908 samples, 48.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()..tokio::runtime::task::harness::poll_future::{{closure}} (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (758 samples, 0.58%)tokio::runtime::coop::budget (65,027 samples, 49.53%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (65,027 samples, 49.53%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (65,009 samples, 49.51%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (65,003 samples, 49.51%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (65,003 samples, 49.51%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (64,538 samples, 49.15%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (64,493 samples, 49.12%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (63,919 samples, 48.68%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (93 samples, 0.07%)syscall (2,486 samples, 1.89%)s..[unknown] (2,424 samples, 1.85%)[..[unknown] (2,416 samples, 1.84%)[..[unknown] (2,130 samples, 1.62%)[unknown] (2,013 samples, 1.53%)[unknown] (1,951 samples, 1.49%)[unknown] (1,589 samples, 1.21%)[unknown] (1,415 samples, 1.08%)[unknown] (1,217 samples, 0.93%)[unknown] (820 samples, 0.62%)[unknown] (564 samples, 0.43%)[unknown] (360 samples, 0.27%)[unknown] (244 samples, 0.19%)[unknown] (194 samples, 0.15%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (339 samples, 0.26%)core::sync::atomic::AtomicUsize::fetch_add (337 samples, 0.26%)core::sync::atomic::atomic_add (337 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (364 samples, 0.28%)[unknown] (154 samples, 0.12%)[unknown] (152 samples, 0.12%)[unknown] (143 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (131 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (80 samples, 0.06%)[unknown] (74 samples, 0.06%)[unknown] (65 samples, 0.05%)[unknown] (64 samples, 0.05%)[unknown] (47 samples, 0.04%)[unknown] (44 samples, 0.03%)[unknown] (43 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (26 samples, 0.02%)[unknown] (20 samples, 0.02%)__GI___libc_write (158 samples, 0.12%)__GI___libc_write (158 samples, 0.12%)mio::sys::unix::waker::eventfd::WakerInternal::wake (159 samples, 0.12%)<&std::fs::File as std::io::Write>::write (159 samples, 0.12%)std::sys::pal::unix::fs::File::write (159 samples, 0.12%)std::sys::pal::unix::fd::FileDesc::write (159 samples, 0.12%)tokio::runtime::driver::Handle::unpark (168 samples, 0.13%)tokio::runtime::driver::IoHandle::unpark (168 samples, 0.13%)tokio::runtime::io::driver::Handle::unpark (168 samples, 0.13%)mio::waker::Waker::wake (165 samples, 0.13%)mio::sys::unix::waker::fdbased::Waker::wake (165 samples, 0.13%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (68,159 samples, 51.91%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (3,024 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (3,023 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (3,022 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (171 samples, 0.13%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (171 samples, 0.13%)core::option::Option<T>::or_else (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::tune_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::stats::Stats::tuned_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (107 samples, 0.08%)__GI___libc_free (17 samples, 0.01%)_int_free (17 samples, 0.01%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Dying,K,V>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::LeafOrInternal>::deallocate_and_ascend (18 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (18 samples, 0.01%)alloc::alloc::dealloc (18 samples, 0.01%)__rdl_dealloc (18 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (18 samples, 0.01%)alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (19 samples, 0.01%)tokio::runtime::task::Task<S>::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::RawTask::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task (26 samples, 0.02%)std::panic::catch_unwind (26 samples, 0.02%)std::panicking::try (26 samples, 0.02%)std::panicking::try::do_call (26 samples, 0.02%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (26 samples, 0.02%)core::ops::function::FnOnce::call_once (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task::{{closure}} (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (26 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::Tracker> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)core::ptr::drop_in_place<std::sync::rwlock::RwLock<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)core::mem::drop (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,NodeType>,alloc::collections::btree::node::marker::KV>::drop_key_val (24 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::assume_init_drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (24 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::entry::Torrent> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::PeerId,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::mem::drop (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::PeerId,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::pre_shutdown (33 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::close_and_shutdown_all (33 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (114 samples, 0.09%)alloc::sync::Arc<T,A>::inner (114 samples, 0.09%)core::ptr::non_null::NonNull<T>::as_ref (114 samples, 0.09%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (108 samples, 0.08%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (108 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (106 samples, 0.08%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (49 samples, 0.04%)alloc::sync::Arc<T,A>::inner (49 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (49 samples, 0.04%)core::num::<impl u32>::wrapping_sub (132 samples, 0.10%)core::sync::atomic::AtomicU64::load (40 samples, 0.03%)core::sync::atomic::atomic_load (40 samples, 0.03%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (48 samples, 0.04%)core::sync::atomic::AtomicU32::load (48 samples, 0.04%)core::sync::atomic::atomic_load (48 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (65 samples, 0.05%)alloc::sync::Arc<T,A>::inner (65 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (65 samples, 0.05%)core::num::<impl u32>::wrapping_sub (50 samples, 0.04%)core::sync::atomic::AtomicU32::load (55 samples, 0.04%)core::sync::atomic::atomic_load (55 samples, 0.04%)core::sync::atomic::AtomicU64::load (80 samples, 0.06%)core::sync::atomic::atomic_load (80 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::pack (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (666 samples, 0.51%)tokio::runtime::scheduler::multi_thread::queue::unpack (147 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (1,036 samples, 0.79%)tokio::runtime::scheduler::multi_thread::queue::unpack (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (49 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (2,414 samples, 1.84%)t..tokio::util::rand::FastRand::fastrand_n (24 samples, 0.02%)tokio::util::rand::FastRand::fastrand (24 samples, 0.02%)std::sys_common::backtrace::__rust_begin_short_backtrace (98,136 samples, 74.74%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (98,042 samples, 74.67%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (98,042 samples, 74.67%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (98,042 samples, 74.67%)std::panic::catch_unwindstd::panicking::try (98,042 samples, 74.67%)std::panicking::trystd::panicking::try::do_call (98,042 samples, 74.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,042 samples, 74.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (98,042 samples, 74.67%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (98,042 samples, 74.67%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (98,042 samples, 74.67%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (98,042 samples, 74.67%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (98,042 samples, 74.67%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (98,042 samples, 74.67%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Context::runstd::panic::catch_unwind (98,137 samples, 74.74%)std::panic::catch_unwindstd::panicking::try (98,137 samples, 74.74%)std::panicking::trystd::panicking::try::do_call (98,137 samples, 74.74%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,137 samples, 74.74%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (98,137 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (98,139 samples, 74.74%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (98,139 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}clone3 (98,205 samples, 74.79%)clone3start_thread (98,205 samples, 74.79%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (98,158 samples, 74.76%)std::sys::pal::unix::thread::Thread::new::thread_startcore::ptr::drop_in_place<std::sys::pal::unix::stack_overflow::Handler> (19 samples, 0.01%)<std::sys::pal::unix::stack_overflow::Handler as core::ops::drop::Drop>::drop (19 samples, 0.01%)std::sys::pal::unix::stack_overflow::imp::drop_handler (19 samples, 0.01%)__GI_munmap (19 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)[unknown] (16 samples, 0.01%)core::fmt::Formatter::pad_integral (112 samples, 0.09%)core::fmt::Formatter::pad_integral::write_prefix (59 samples, 0.04%)core::fmt::Formatter::pad_integral (16 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (19 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (51 samples, 0.04%)rand_chacha::guts::round (18 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (26 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide (14 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (14 samples, 0.01%)std_detect::detect::check_for (14 samples, 0.01%)std_detect::detect::cache::test (14 samples, 0.01%)std_detect::detect::cache::Cache::test (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)core::cell::RefCell<T>::borrow_mut (81 samples, 0.06%)core::cell::RefCell<T>::try_borrow_mut (81 samples, 0.06%)core::cell::BorrowRefMut::new (81 samples, 0.06%)std::sys::pal::unix::time::Timespec::now (164 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (106 samples, 0.08%)tokio::runtime::coop::budget (105 samples, 0.08%)tokio::runtime::coop::with_budget (105 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (96 samples, 0.07%)std::sys::pal::unix::time::Timespec::sub_timespec (35 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock_contended (15 samples, 0.01%)syscall (90 samples, 0.07%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (21 samples, 0.02%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run (61 samples, 0.05%)tokio::runtime::context::runtime::enter_runtime (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (61 samples, 0.05%)tokio::runtime::context::set_scheduler (61 samples, 0.05%)std::thread::local::LocalKey<T>::with (61 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (61 samples, 0.05%)tokio::runtime::context::set_scheduler::{{closure}} (61 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::set (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (19 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (17 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (14 samples, 0.01%)core::cell::Cell<T>::get (14 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (22 samples, 0.02%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (22 samples, 0.02%)tokio::runtime::context::set_current_task_id (22 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (22 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (112 samples, 0.09%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (111 samples, 0.08%)tokio::runtime::task::harness::poll_future (125 samples, 0.10%)std::panic::catch_unwind (125 samples, 0.10%)std::panicking::try (125 samples, 0.10%)std::panicking::try::do_call (125 samples, 0.10%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (125 samples, 0.10%)tokio::runtime::task::harness::poll_future::{{closure}} (125 samples, 0.10%)tokio::runtime::task::core::Core<T,S>::poll (125 samples, 0.10%)tokio::runtime::task::raw::poll (157 samples, 0.12%)tokio::runtime::task::harness::Harness<T,S>::poll (135 samples, 0.10%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (135 samples, 0.10%)tokio::runtime::time::Driver::park_internal (15 samples, 0.01%)torrust_tracker::bootstrap::logging::INIT (17 samples, 0.01%)__memcpy_avx512_unaligned_erms (397 samples, 0.30%)_int_free (24 samples, 0.02%)_int_malloc (132 samples, 0.10%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (570 samples, 0.43%)__GI___lll_lock_wait_private (22 samples, 0.02%)futex_wait (14 samples, 0.01%)__memcpy_avx512_unaligned_erms (299 samples, 0.23%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (361 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (41 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (23 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (53 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (14 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (63 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (21 samples, 0.02%)__GI___libc_malloc (18 samples, 0.01%)alloc::vec::Vec<T>::with_capacity (116 samples, 0.09%)alloc::vec::Vec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (116 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (116 samples, 0.09%)alloc::alloc::Global::alloc_impl (116 samples, 0.09%)alloc::alloc::alloc (116 samples, 0.09%)__rdl_alloc (116 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (116 samples, 0.09%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (53 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (53 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (53 samples, 0.04%)_int_malloc (21 samples, 0.02%)[unknown] (36 samples, 0.03%)[unknown] (16 samples, 0.01%)core::mem::zeroed (27 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (27 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (27 samples, 0.02%)core::intrinsics::write_bytes (27 samples, 0.02%)[unknown] (27 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (64 samples, 0.05%)mio::net::udp::UdpSocket::recv_from (49 samples, 0.04%)mio::io_source::IoSource<T>::do_io (49 samples, 0.04%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (49 samples, 0.04%)mio::net::udp::UdpSocket::recv_from::{{closure}} (49 samples, 0.04%)std::net::udp::UdpSocket::recv_from (49 samples, 0.04%)std::sys_common::net::UdpSocket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from_with_flags (49 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (271 samples, 0.21%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (143 samples, 0.11%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (141 samples, 0.11%)tokio::runtime::io::registration::Registration::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (15 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (359 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (346 samples, 0.26%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (39 samples, 0.03%)tokio::task::spawn::spawn (39 samples, 0.03%)tokio::task::spawn::spawn_inner (39 samples, 0.03%)tokio::runtime::context::current::with_current (39 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (39 samples, 0.03%)tokio::runtime::context::current::with_current::{{closure}} (39 samples, 0.03%)core::option::Option<T>::map (39 samples, 0.03%)tokio::task::spawn::spawn_inner::{{closure}} (39 samples, 0.03%)tokio::runtime::scheduler::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (39 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::bind (34 samples, 0.03%)all (131,301 samples, 100%)tokio-runtime-w (131,061 samples, 99.82%)tokio-runtime-w \ No newline at end of file diff --git a/docs/media/kcachegrind-screenshot.png b/docs/media/kcachegrind-screenshot.png new file mode 100644 index 000000000..a10eb5ad6 Binary files /dev/null and b/docs/media/kcachegrind-screenshot.png differ diff --git a/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent new file mode 100644 index 000000000..1a08a811b Binary files /dev/null and b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent differ diff --git a/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json new file mode 100644 index 000000000..caaa1a417 --- /dev/null +++ b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json @@ -0,0 +1,10 @@ +{ + "created by": "qBittorrent v4.4.1", + "creation date": 1679674628, + "info": { + "length": 172204, + "name": "mandelbrot_2048x2048.png", + "piece length": 16384, + "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" + } +} \ No newline at end of file diff --git a/docs/media/torrent-repository-implementations-benchmarking-report.png b/docs/media/torrent-repository-implementations-benchmarking-report.png new file mode 100644 index 000000000..ee87c6d42 Binary files /dev/null and b/docs/media/torrent-repository-implementations-benchmarking-report.png differ diff --git a/docs/media/torrust-tracker-components.png b/docs/media/torrust-tracker-components.png new file mode 100644 index 000000000..19fe3c0b8 Binary files /dev/null and b/docs/media/torrust-tracker-components.png differ diff --git a/docs/profiling.md b/docs/profiling.md new file mode 100644 index 000000000..8038f9e77 --- /dev/null +++ b/docs/profiling.md @@ -0,0 +1,132 @@ +# Profiling + +## Using flamegraph + +### Requirements + +You need to install some dependencies. For Ubuntu you can run: + +```console +sudo apt-get install clang lld +``` + +You also need to uncomment these lines in the cargo [config.toml](./../.cargo/config.toml) file. + +```toml +[target.x86_64-unknown-linux-gnu] +linker = "/usr/bin/clang" +rustflags = ["-Clink-arg=-fuse-ld=lld", "-Clink-arg=-Wl,--no-rosegment"] +``` + +Follow the [flamegraph](https://github.com/flamegraph-rs/flamegraph) instructions for installation. + +Apart from running the tracker you will need to run some request if you want to profile services while they are processing requests. + +You can use the aquatic [UDP load test](https://github.com/greatest-ape/aquatic/tree/master/crates/udp_load_test) script. + +### Generate flamegraph + +To generate the graph you will need to: + +1. Build the tracker for profiling. +2. Run the aquatic UDP load test. +3. Run the tracker with flamegraph and profiling configuration. + +```console +cargo build --profile=release-debug --bin=profiling +./target/release/aquatic_udp_load_test -c "load-test-config.toml" +sudo TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" /home/USER/.cargo/bin/flamegraph -- ./target/release-debug/profiling 60 +``` + +__NOTICE__: You need to install the `aquatic_udp_load_test` program. + +The output should be like the following: + +```output +Loading configuration file: `./share/default/config/tracker.udp.benchmarking.toml` ... +Torrust successfully shutdown. +[ perf record: Woken up 23377 times to write data ] +Warning: +Processed 533730 events and lost 3 chunks! + +Check IO/CPU overload! + +[ perf record: Captured and wrote 5899.806 MB perf.data (373239 samples) ] +writing flamegraph to "flamegraph.svg" +``` + +![flamegraph](./media/flamegraph.svg) + +__NOTICE__: You need to provide the absolute path for the installed `flamegraph` app if you use sudo. Replace `/home/USER/.cargo/bin/flamegraph` with the location of your installed `flamegraph` app. You can run it without sudo but you can get a warning message like the following: + +```output +WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted, +check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid. + +Samples in kernel functions may not be resolved if a suitable vmlinux +file is not found in the buildid cache or in the vmlinux path. + +Samples in kernel modules won't be resolved at all. + +If some relocation was applied (e.g. kexec) symbols may be misresolved +even with a suitable vmlinux or kallsyms file. + +Couldn't record kernel reference relocation symbol +Symbol resolution may be skewed if relocation was used (e.g. kexec). +Check /proc/kallsyms permission or run as root. +Loading configuration file: `./share/default/config/tracker.udp.benchmarking.toml` ... +``` + +And some bars in the graph will have the `unknown` label. + +![flamegraph generated without sudo](./media/flamegraph_generated_without_sudo.svg) + +## Using valgrind and kcachegrind + +You need to: + +1. Build an run the tracker for profiling. +2. Make requests to the tracker while it's running. + +Build and the binary for profiling: + +```console +RUSTFLAGS='-g' cargo build --release --bin profiling \ + && export TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" \ + && valgrind \ + --tool=callgrind \ + --callgrind-out-file=callgrind.out \ + --collect-jumps=yes \ + --simulate-cache=yes \ + ./target/release/profiling 60 +``` + +> NOTICE: You should make requests to the services you want to profile. For example, using the [UDP load test](./benchmarking.md#run-udp-load-test). + +After running the tracker with ` **The `[semantic version]` is bumped according to releases, new features, and breaking changes.** +> +> *The `develop` branch uses the (semantic version) suffix `-develop`.* + +## Process: + +**Note**: this guide assumes that the your git `torrust` remote is like this: + +```sh +git remote show torrust +``` + +```s +* remote torrust + Fetch URL: git@github.com:torrust/torrust-tracker.git + Push URL: git@github.com:torrust/torrust-tracker.git +... +``` + + +### 1. The `develop` branch is ready for a release. +The `develop` branch should have the version `[semantic version]-develop` that is ready to be released. + +### 2. Stage `develop` HEAD for merging into the `main` branch: + +```sh +git fetch --all +git push --force torrust develop:staging/main +``` + +### 3. Create Release Commit: + +```sh +git stash +git switch staging/main +git reset --hard torrust/staging/main +# change `[semantic version]-develop` to `[semantic version]`. +git add -A +git commit -m "release: version [semantic version]" +git push torrust +``` + +### 4. Create and Merge Pull Request from `staging/main` into `main` branch. + +Pull request title format: "Release Version `[semantic version]`". + +This pull request merges the new version into the `main` branch. + +### 5. Push new version from `main` HEAD to `releases/v[semantic version]` branch: + +```sh +git fetch --all +git push torrust main:releases/v[semantic version] +``` + +> **Check that the deployment is successful!** + +### 6. Create Release Tag: + +```sh +git switch releases/v[semantic version] +git tag --sign v[semantic version] +git push --tags torrust +``` + +### 7. Create Release on Github from Tag. +This is for those who wish to download the source code. + +### 8. Stage `main` HEAD for merging into the `develop` branch: +Merge release back into the develop branch. + +```sh +git fetch --all +git push --force torrust main:staging/develop +``` +### 9. Create Comment that bumps next development version: + +```sh +git stash +git switch staging/develop +git reset --hard torrust/staging/develop +# change `[semantic version]` to `(next)[semantic version]-develop`. +git add -A +git commit -m "develop: bump to version (next)[semantic version]-develop" +git push torrust +``` + +### 10. Create and Merge Pull Request from `staging/develop` into `develop` branch. + +Pull request title format: "Version `[semantic version]` was Released". + +This pull request merges the new release into the `develop` branch and bumps the version number. diff --git a/migrations/README.md b/migrations/README.md new file mode 100644 index 000000000..090c46ccb --- /dev/null +++ b/migrations/README.md @@ -0,0 +1,5 @@ +# Database Migrations + +We don't support automatic migrations yet. The tracker creates all the needed tables when it starts. The SQL sentences are hardcoded in each database driver. + +The migrations in this folder were introduced to add some new changes (permanent keys) and to allow users to migrate to the new version. In the future, we will remove the hardcoded SQL and start using a Rust crate for database migrations. For the time being, if you are using the initial schema described in the migration `20240730183000_torrust_tracker_create_all_tables.sql` you will need to run all the subsequent migrations manually. diff --git a/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql b/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql new file mode 100644 index 000000000..407ae4dd1 --- /dev/null +++ b/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql @@ -0,0 +1,21 @@ +CREATE TABLE + IF NOT EXISTS whitelist ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE + ); + +CREATE TABLE + IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + ); + +CREATE TABLE + IF NOT EXISTS `keys` ( + `id` INT NOT NULL AUTO_INCREMENT, + `key` VARCHAR(32) NOT NULL, + `valid_until` INT (10) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE (`key`) + ); \ No newline at end of file diff --git a/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql b/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql new file mode 100644 index 000000000..2602797d6 --- /dev/null +++ b/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql @@ -0,0 +1 @@ +ALTER TABLE `keys` CHANGE `valid_until` `valid_until` INT (10); \ No newline at end of file diff --git a/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql b/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql new file mode 100644 index 000000000..bd451bf8b --- /dev/null +++ b/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql @@ -0,0 +1,19 @@ +CREATE TABLE + IF NOT EXISTS whitelist ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE + ); + +CREATE TABLE + IF NOT EXISTS torrents ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + ); + +CREATE TABLE + IF NOT EXISTS keys ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER NOT NULL + ); \ No newline at end of file diff --git a/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql b/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql new file mode 100644 index 000000000..c6746e3ee --- /dev/null +++ b/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql @@ -0,0 +1,12 @@ +CREATE TABLE + IF NOT EXISTS keys_new ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER + ); + +INSERT INTO keys_new SELECT * FROM `keys`; + +DROP TABLE `keys`; + +ALTER TABLE keys_new RENAME TO `keys`; \ No newline at end of file diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml new file mode 100644 index 000000000..ffb75fb7c --- /dev/null +++ b/packages/clock/Cargo.toml @@ -0,0 +1,24 @@ +[package] +description = "A library to a clock for the torrust tracker." +keywords = ["clock", "library", "torrents"] +name = "torrust-tracker-clock" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +chrono = { version = "0", default-features = false, features = ["clock"] } +lazy_static = "1" + +torrust-tracker-primitives = { version = "3.0.0", path = "../primitives" } + +[dev-dependencies] diff --git a/packages/clock/README.md b/packages/clock/README.md new file mode 100644 index 000000000..bfdd7808f --- /dev/null +++ b/packages/clock/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Clock + +A library to provide a working and mockable clock for the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-clock). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/clock/src/clock/mod.rs b/packages/clock/src/clock/mod.rs new file mode 100644 index 000000000..50afbc9db --- /dev/null +++ b/packages/clock/src/clock/mod.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use self::stopped::StoppedClock; +use self::working::WorkingClock; + +pub mod stopped; +pub mod working; + +/// A generic structure that represents a clock. +/// +/// It can be either the working clock (production) or the stopped clock +/// (testing). It implements the `Time` trait, which gives you the current time. +#[derive(Debug)] +pub struct Clock { + clock: std::marker::PhantomData, +} + +/// The working clock. It returns the current time. +pub type Working = Clock; +/// The stopped clock. It returns always the same fixed time. +pub type Stopped = Clock; + +/// Trait for types that can be used as a timestamp clock. +pub trait Time: Sized { + fn now() -> DurationSinceUnixEpoch; + + fn dbg_clock_type() -> String; + + #[must_use] + fn now_add(add_time: &Duration) -> Option { + Self::now().checked_add(*add_time) + } + #[must_use] + fn now_sub(sub_time: &Duration) -> Option { + Self::now().checked_sub(*sub_time) + } +} + +#[cfg(test)] +mod tests { + use std::any::TypeId; + use std::time::Duration; + + use crate::clock::{self, Stopped, Time, Working}; + use crate::CurrentClock; + + #[test] + fn it_should_be_the_stopped_clock_as_default_when_testing() { + // We are testing, so we should default to the fixed time. + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(Stopped::now(), CurrentClock::now()); + } + + #[test] + fn it_should_have_different_times() { + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(Stopped::now(), Working::now()); + } + + #[test] + fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); + } +} diff --git a/packages/clock/src/clock/stopped/mod.rs b/packages/clock/src/clock/stopped/mod.rs new file mode 100644 index 000000000..5d0b2ec4e --- /dev/null +++ b/packages/clock/src/clock/stopped/mod.rs @@ -0,0 +1,209 @@ +/// Trait for types that can be used as a timestamp clock stopped +/// at a given time. +#[allow(clippy::module_name_repetitions)] +pub struct StoppedClock {} + +#[allow(clippy::module_name_repetitions)] +pub trait Stopped: clock::Time { + /// It sets the clock to a given time. + fn local_set(unix_time: &DurationSinceUnixEpoch); + + /// It sets the clock to the Unix Epoch. + fn local_set_to_unix_epoch() { + Self::local_set(&DurationSinceUnixEpoch::ZERO); + } + + /// It sets the clock to the time the application started. + fn local_set_to_app_start_time(); + + /// It sets the clock to the current system time. + fn local_set_to_system_time_now(); + + /// It adds a `Duration` to the clock. + /// + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. + fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It subtracts a `Duration` from the clock. + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). + fn local_reset(); +} + +use std::num::IntErrorKind; +use std::time::Duration; + +use super::{DurationSinceUnixEpoch, Time}; +use crate::clock; + +impl Time for clock::Stopped { + fn now() -> DurationSinceUnixEpoch { + detail::FIXED_TIME.with(|time| { + return *time.borrow(); + }) + } + + fn dbg_clock_type() -> String { + "Stopped".to_owned() + } +} + +impl Stopped for clock::Stopped { + fn local_set(unix_time: &DurationSinceUnixEpoch) { + detail::FIXED_TIME.with(|time| { + *time.borrow_mut() = *unix_time; + }); + } + + fn local_set_to_app_start_time() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_set_to_system_time_now() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_add(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::PosOverflow); + } + }; + Ok(()) + }) + } + + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::NegOverflow); + } + }; + Ok(()) + }) + } + + fn local_reset() { + Self::local_set(&detail::get_default_fixed_time()); + } +} + +#[cfg(test)] +mod tests { + use std::thread; + use std::time::Duration; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::clock::{Stopped, Time, Working}; + + #[test] + fn it_should_default_to_zero_when_testing() { + assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); + } + + #[test] + fn it_should_possible_to_set_the_time() { + // Check we start with ZERO. + assert_eq!(Stopped::now(), Duration::ZERO); + + // Set to Current Time and Check + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + + // Elapse the Current Time and Check + Stopped::local_add(×tamp).unwrap(); + assert_eq!(Stopped::now(), timestamp + timestamp); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } + + #[test] + fn it_should_default_to_zero_on_thread_exit() { + assert_eq!(Stopped::now(), Duration::ZERO); + let after5 = Working::now_add(&Duration::from_secs(5)).unwrap(); + Stopped::local_set(&after5); + assert_eq!(Stopped::now(), after5); + + let t = thread::spawn(move || { + // each thread starts out with the initial value of ZERO + assert_eq!(Stopped::now(), Duration::ZERO); + + // and gets set to the current time. + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + }); + + // wait for the thread to complete and bail out on panic + t.join().unwrap(); + + // we retain our original value of current time + 5sec despite the child thread + assert_eq!(Stopped::now(), after5); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } +} + +mod detail { + use std::cell::RefCell; + use std::time::SystemTime; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::static_time; + + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + + pub fn get_app_start_time() -> DurationSinceUnixEpoch { + (*static_time::TIME_AT_APP_START) + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + } + + #[cfg(not(test))] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + get_app_start_time() + } + + #[cfg(test)] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::ZERO + } + + #[cfg(test)] + mod tests { + use std::time::Duration; + + use crate::clock::stopped::detail::{get_app_start_time, get_default_fixed_time}; + + #[test] + fn it_should_get_the_zero_start_time_when_testing() { + assert_eq!(get_default_fixed_time(), Duration::ZERO); + } + + #[test] + fn it_should_get_app_start_time() { + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); + assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); + } + } +} diff --git a/packages/clock/src/clock/working/mod.rs b/packages/clock/src/clock/working/mod.rs new file mode 100644 index 000000000..6d0b4dcf7 --- /dev/null +++ b/packages/clock/src/clock/working/mod.rs @@ -0,0 +1,18 @@ +use std::time::SystemTime; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::clock; + +#[allow(clippy::module_name_repetitions)] +pub struct WorkingClock; + +impl clock::Time for clock::Working { + fn now() -> DurationSinceUnixEpoch { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() + } + + fn dbg_clock_type() -> String { + "Working".to_owned() + } +} diff --git a/packages/clock/src/conv/mod.rs b/packages/clock/src/conv/mod.rs new file mode 100644 index 000000000..0ac278171 --- /dev/null +++ b/packages/clock/src/conv/mod.rs @@ -0,0 +1,82 @@ +use std::str::FromStr; + +use chrono::{DateTime, Utc}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// It converts a string in ISO 8601 format to a timestamp. +/// +/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch +/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { + convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) +} + +/// It converts a `DateTime::` to a timestamp. +/// For example, the `DateTime::` of the Unix Epoch will be converted to a +/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` type. +/// (this will naturally happen in 584.9 billion years) +#[must_use] +pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) +} + +/// It converts a timestamp to a `DateTime::`. +/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be +/// converted to the `DateTime::` of the Unix Epoch. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { + DateTime::from_timestamp( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ) + .unwrap() +} + +#[cfg(test)] +mod tests { + use chrono::DateTime; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::conv::{ + convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, + }; + + #[test] + fn should_be_converted_to_datetime_utc() { + let timestamp = DurationSinceUnixEpoch::ZERO; + assert_eq!( + convert_from_timestamp_to_datetime_utc(timestamp), + DateTime::from_timestamp(0, 0).unwrap() + ); + } + + #[test] + fn should_be_converted_from_datetime_utc() { + let datetime = DateTime::from_timestamp(0, 0).unwrap(); + assert_eq!( + convert_from_datetime_utc_to_timestamp(&datetime), + DurationSinceUnixEpoch::ZERO + ); + } + + #[test] + fn should_be_converted_from_datetime_utc_in_iso_8601() { + let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); + assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); + } +} diff --git a/packages/clock/src/lib.rs b/packages/clock/src/lib.rs new file mode 100644 index 000000000..295d22c16 --- /dev/null +++ b/packages/clock/src/lib.rs @@ -0,0 +1,53 @@ +//! Time related functions and types. +//! +//! It's usually a good idea to control where the time comes from +//! in an application so that it can be mocked for testing and it can be +//! controlled in production so we get the intended behavior without +//! relying on the specific time zone for the underlying system. +//! +//! Clocks use the type `DurationSinceUnixEpoch` which is a +//! `std::time::Duration` since the Unix Epoch (timestamp). +//! +//! ```text +//! Local time: lun 2023-03-27 16:12:00 WEST +//! Universal time: lun 2023-03-27 15:12:00 UTC +//! Time zone: Atlantic/Canary (WEST, +0100) +//! Timestamp: 1679929914 +//! Duration: 1679929914.10167426 +//! ``` +//! +//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will +//! > overflow in 584.9 billion years. +//! +//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you +//! > the ability to use the clock regardless of the underlying system time zone +//! > configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). + +pub mod clock; +pub mod conv; +pub mod static_time; +pub mod time_extent; + +#[macro_use] +extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/packages/clock/src/static_time/mod.rs b/packages/clock/src/static_time/mod.rs new file mode 100644 index 000000000..79557b3c4 --- /dev/null +++ b/packages/clock/src/static_time/mod.rs @@ -0,0 +1,8 @@ +//! It contains a static variable that is set to the time at which +//! the application started. +use std::time::SystemTime; + +lazy_static! { + /// The time at which the application started. + pub static ref TIME_AT_APP_START: SystemTime = SystemTime::now(); +} diff --git a/packages/clock/src/time_extent/mod.rs b/packages/clock/src/time_extent/mod.rs new file mode 100644 index 000000000..c51849f21 --- /dev/null +++ b/packages/clock/src/time_extent/mod.rs @@ -0,0 +1,665 @@ +//! It includes functionality to handle time extents. +//! +//! Time extents are used to represent a duration of time which contains +//! N times intervals of the same duration. +//! +//! Given a duration of: 60 seconds. +//! +//! ```text +//! |------------------------------------------------------------| +//! ``` +//! +//! If we define a **base** duration of `10` seconds, we would have `6` intervals. +//! +//! ```text +//! |----------|----------|----------|----------|----------|----------| +//! ^--- 10 seconds +//! ``` +//! +//! Then, You can represent half of the duration (`30` seconds) as: +//! +//! ```text +//! |----------|----------|----------|----------|----------|----------| +//! ^--- 30 seconds +//! ``` +//! +//! `3` times (**multiplier**) the **base** interval (3*10 = 30 seconds): +//! +//! ```text +//! |----------|----------|----------|----------|----------|----------| +//! ^--- 30 seconds (3 units of 10 seconds) +//! ``` +//! +//! Time extents are a way to measure time duration using only one unit of time +//! (**base** duration) repeated `N` times (**multiplier**). +//! +//! Time extents are not clocks in a sense that they do not have a start time. +//! They are not synchronized with the real time. In order to measure time, +//! you need to define a start time for the intervals. +//! +//! For example, we could measure time is "lustrums" (5 years) since the start +//! of the 21st century. The time extent would contains a base 5-year duration +//! and the multiplier. The current "lustrum" (2023) would be 5th one if we +//! start counting "lustrums" at 1. +//! +//! ```text +//! Lustrum 1: 2000-2004 +//! Lustrum 2: 2005-2009 +//! Lustrum 3: 2010-2014 +//! Lustrum 4: 2015-2019 +//! Lustrum 5: 2020-2024 +//! ``` +//! +//! More practically time extents are used to represent number of time intervals +//! since the Unix Epoch. Each interval is typically an amount of seconds. +//! It's specially useful to check expiring dates. For example, you can have an +//! authentication token that expires after 120 seconds. If you divide the +//! current timestamp by 120 you get the number of 2-minute intervals since the +//! Unix Epoch, you can hash that value with a secret key and send it to a +//! client. The client can authenticate by sending the hashed value back to the +//! server. The server can build the same hash and compare it with the one sent +//! by the client. The hash would be the same during the 2-minute interval, but +//! it would change after that. This method is one of the methods used by UDP +//! trackers to generate and verify a connection ID, which a a token sent to +//! the client to identify the connection. +use std::num::{IntErrorKind, TryFromIntError}; +use std::time::Duration; + +use crate::clock::{self, Stopped, Working}; + +/// This trait defines the operations that can be performed on a `TimeExtent`. +pub trait Extent: Sized + Default { + type Base; + type Multiplier; + type Product; + + /// It creates a new `TimeExtent`. + fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; + + /// It increases the `TimeExtent` by a multiplier. + /// + /// # Errors + /// + /// Will return `IntErrorKind` if `add` would overflow the internal `Duration`. + fn increase(&self, add: Self::Multiplier) -> Result; + + /// It decreases the `TimeExtent` by a multiplier. + /// + /// # Errors + /// + /// Will return `IntErrorKind` if `sub` would underflow the internal `Duration`. + fn decrease(&self, sub: Self::Multiplier) -> Result; + + /// It returns the total `Duration` of the `TimeExtent`. + fn total(&self) -> Option>; + + /// It returns the total `Duration` of the `TimeExtent` plus one increment. + fn total_next(&self) -> Option>; +} + +/// The `TimeExtent` base `Duration`, which is the duration of a single interval. +pub type Base = Duration; +/// The `TimeExtent` `Multiplier`, which is the number of `Base` duration intervals. +pub type Multiplier = u64; +/// The `TimeExtent` product, which is the total duration of the `TimeExtent`. +pub type Product = Base; + +/// A `TimeExtent` is a duration of time which contains N times intervals +/// of the same duration. +#[derive(Debug, Default, Hash, PartialEq, Eq)] +pub struct TimeExtent { + pub increment: Base, + pub amount: Multiplier, +} + +/// A zero time extent. It's the additive identity for a `TimeExtent`. +pub const ZERO: TimeExtent = TimeExtent { + increment: Base::ZERO, + amount: Multiplier::MIN, +}; + +/// The maximum value for a `TimeExtent`. +pub const MAX: TimeExtent = TimeExtent { + increment: Base::MAX, + amount: Multiplier::MAX, +}; + +impl TimeExtent { + #[must_use] + pub const fn from_sec(seconds: u64, amount: &Multiplier) -> Self { + Self { + increment: Base::from_secs(seconds), + amount: *amount, + } + } +} + +fn checked_duration_from_nanos(time: u128) -> Result { + const NANOS_PER_SEC: u32 = 1_000_000_000; + + let secs = time.div_euclid(u128::from(NANOS_PER_SEC)); + let nanos = time.rem_euclid(u128::from(NANOS_PER_SEC)); + + assert!(nanos < u128::from(NANOS_PER_SEC)); + + match u64::try_from(secs) { + Err(error) => Err(error), + Ok(secs) => Ok(Duration::new(secs, nanos.try_into().unwrap())), + } +} + +impl Extent for TimeExtent { + type Base = Base; + type Multiplier = Multiplier; + type Product = Product; + + fn new(increment: &Self::Base, amount: &Self::Multiplier) -> Self { + Self { + increment: *increment, + amount: *amount, + } + } + + fn increase(&self, add: Self::Multiplier) -> Result { + match self.amount.checked_add(add) { + None => Err(IntErrorKind::PosOverflow), + Some(amount) => Ok(Self { + increment: self.increment, + amount, + }), + } + } + + fn decrease(&self, sub: Self::Multiplier) -> Result { + match self.amount.checked_sub(sub) { + None => Err(IntErrorKind::NegOverflow), + Some(amount) => Ok(Self { + increment: self.increment, + amount, + }), + } + } + + fn total(&self) -> Option> { + self.increment + .as_nanos() + .checked_mul(u128::from(self.amount)) + .map(checked_duration_from_nanos) + } + + fn total_next(&self) -> Option> { + self.increment + .as_nanos() + .checked_mul(u128::from(self.amount) + 1) + .map(checked_duration_from_nanos) + } +} + +/// A `TimeExtent` maker. It's a clock base on time extents. +/// It gives you the time in time extents. +pub trait Make: Sized +where + Clock: clock::Time, +{ + /// It gives you the current time extent (with a certain increment) for + /// the current time. It gets the current timestamp front the `Clock`. + /// + /// For example: + /// + /// - If the base increment is `1` second, it will return a time extent + /// whose duration is `1 second` and whose multiplier is the the number + /// of seconds since the Unix Epoch (time extent). + /// - If the base increment is `1` minute, it will return a time extent + /// whose duration is `60 seconds` and whose multiplier is the number of + /// minutes since the Unix Epoch (time extent). + #[must_use] + fn now(increment: &Base) -> Option> { + Clock::now() + .as_nanos() + .checked_div((*increment).as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }) + } + + /// Same as [`now`](crate::time_extent::Make::now), but it + /// will add an extra duration to the current time before calculating the + /// time extent. It gives you a time extent for a time in the future. + #[must_use] + fn now_after(increment: &Base, add_time: &Duration) -> Option> { + match Clock::now_add(add_time) { + None => None, + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), + } + } + + /// Same as [`now`](crate::time_extent::Make::now), but it + /// will subtract a duration to the current time before calculating the + /// time extent. It gives you a time extent for a time in the past. + #[must_use] + fn now_before(increment: &Base, sub_time: &Duration) -> Option> { + match Clock::now_sub(sub_time) { + None => None, + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), + } + } +} + +/// A `TimeExtent` maker which makes `TimeExtents`. +/// +/// It's a clock which measures time in `TimeExtents`. +#[derive(Debug)] +pub struct Maker { + clock: std::marker::PhantomData, +} + +/// A `TimeExtent` maker which makes `TimeExtents` from the `Working` clock. +pub type WorkingTimeExtentMaker = Maker; + +/// A `TimeExtent` maker which makes `TimeExtents` from the `Stopped` clock. +pub type StoppedTimeExtentMaker = Maker; + +impl Make for WorkingTimeExtentMaker {} +impl Make for StoppedTimeExtentMaker {} + +#[cfg(test)] +mod test { + use crate::time_extent::TimeExtent; + + const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); + + mod fn_checked_duration_from_nanos { + use std::time::Duration; + + use crate::time_extent::checked_duration_from_nanos; + use crate::time_extent::test::TIME_EXTENT_VAL; + + const NANOS_PER_SEC: u32 = 1_000_000_000; + + #[test] + fn it_should_give_zero_for_zero_input() { + assert_eq!(checked_duration_from_nanos(0).unwrap(), Duration::ZERO); + } + + #[test] + fn it_should_be_the_same_as_duration_implementation_for_u64_numbers() { + assert_eq!( + checked_duration_from_nanos(1_232_143_214_343_432).unwrap(), + Duration::from_nanos(1_232_143_214_343_432) + ); + assert_eq!( + checked_duration_from_nanos(u128::from(u64::MAX)).unwrap(), + Duration::from_nanos(u64::MAX) + ); + } + + #[test] + fn it_should_work_for_some_numbers_larger_than_u64() { + assert_eq!( + checked_duration_from_nanos(u128::from(TIME_EXTENT_VAL.amount) * u128::from(NANOS_PER_SEC)).unwrap(), + Duration::from_secs(TIME_EXTENT_VAL.amount) + ); + } + + #[test] + fn it_should_fail_for_numbers_that_are_too_large() { + assert_eq!( + checked_duration_from_nanos(u128::MAX).unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod time_extent { + + mod fn_default { + use crate::time_extent::{TimeExtent, ZERO}; + + #[test] + fn it_should_default_initialize_to_zero() { + assert_eq!(TimeExtent::default(), ZERO); + } + } + + mod fn_from_sec { + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Multiplier, TimeExtent, ZERO}; + + #[test] + fn it_should_make_empty_for_zero() { + assert_eq!(TimeExtent::from_sec(u64::MIN, &Multiplier::MIN), ZERO); + } + #[test] + fn it_should_make_from_seconds() { + assert_eq!( + TimeExtent::from_sec(TIME_EXTENT_VAL.increment.as_secs(), &TIME_EXTENT_VAL.amount), + TIME_EXTENT_VAL + ); + } + } + + mod fn_new { + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; + + #[test] + fn it_should_make_empty_for_zero() { + assert_eq!(TimeExtent::new(&Base::ZERO, &Multiplier::MIN), ZERO); + } + + #[test] + fn it_should_make_new() { + assert_eq!( + TimeExtent::new(&Base::from_millis(2), &TIME_EXTENT_VAL.amount), + TimeExtent { + increment: Base::from_millis(2), + amount: TIME_EXTENT_VAL.amount + } + ); + } + } + + mod fn_increase { + use std::num::IntErrorKind; + + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; + + #[test] + fn it_should_not_increase_for_zero() { + assert_eq!(ZERO.increase(0).unwrap(), ZERO); + } + + #[test] + fn it_should_increase() { + assert_eq!( + TIME_EXTENT_VAL.increase(50).unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount + 50, + } + ); + } + + #[test] + fn it_should_fail_when_attempting_to_increase_beyond_bounds() { + assert_eq!(TIME_EXTENT_VAL.increase(u64::MAX), Err(IntErrorKind::PosOverflow)); + } + } + + mod fn_decrease { + use std::num::IntErrorKind; + + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; + + #[test] + fn it_should_not_decrease_for_zero() { + assert_eq!(ZERO.decrease(0).unwrap(), ZERO); + } + + #[test] + fn it_should_decrease() { + assert_eq!( + TIME_EXTENT_VAL.decrease(50).unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: TIME_EXTENT_VAL.amount - 50, + } + ); + } + + #[test] + fn it_should_fail_when_attempting_to_decrease_beyond_bounds() { + assert_eq!(TIME_EXTENT_VAL.decrease(u64::MAX), Err(IntErrorKind::NegOverflow)); + } + } + + mod fn_total { + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + + #[test] + fn it_should_be_zero_for_zero() { + assert_eq!(ZERO.total().unwrap().unwrap(), Product::ZERO); + } + + #[test] + fn it_should_give_a_total() { + assert_eq!( + TIME_EXTENT_VAL.total().unwrap().unwrap(), + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + + assert_eq!( + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + .total() + .unwrap() + .unwrap(), + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + ); + + assert_eq!( + TimeExtent::new(&Base::from_secs(1), &(u64::MAX)).total().unwrap().unwrap(), + Product::from_secs(u64::MAX) + ); + } + + #[test] + fn it_should_fail_when_too_large() { + assert_eq!(MAX.total(), None); + } + + #[test] + fn it_should_fail_when_product_is_too_large() { + let time_extent = TimeExtent { + increment: MAX.increment, + amount: 2, + }; + assert_eq!( + time_extent.total().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod fn_total_next { + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + + #[test] + fn it_should_be_zero_for_zero() { + assert_eq!(ZERO.total_next().unwrap().unwrap(), Product::ZERO); + } + + #[test] + fn it_should_give_a_total() { + assert_eq!( + TIME_EXTENT_VAL.total_next().unwrap().unwrap(), + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) + ); + + assert_eq!( + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + .total_next() + .unwrap() + .unwrap(), + Product::new( + TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount), + Base::from_millis(2).as_nanos().try_into().unwrap() + ) + ); + + assert_eq!( + TimeExtent::new(&Base::from_secs(1), &(u64::MAX - 1)) + .total_next() + .unwrap() + .unwrap(), + Product::from_secs(u64::MAX) + ); + } + + #[test] + fn it_should_fail_when_too_large() { + assert_eq!(MAX.total_next(), None); + } + + #[test] + fn it_should_fail_when_product_is_too_large() { + let time_extent = TimeExtent { + increment: MAX.increment, + amount: 2, + }; + assert_eq!( + time_extent.total_next().unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + } + + mod make_time_extent { + + mod fn_now { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; + + #[test] + fn it_should_give_a_time_extent() { + assert_eq!( + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TimeExtent { + increment: TIME_EXTENT_VAL.increment, + amount: 0 + } + ); + + CurrentClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + + assert_eq!( + DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), + TIME_EXTENT_VAL + ); + } + + #[test] + fn it_should_fail_for_zero() { + assert_eq!(DefaultTimeExtentMaker::now(&Base::ZERO), None); + } + + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + + mod fn_now_after { + use std::time::Duration; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; + + #[test] + fn it_should_give_a_time_extent() { + assert_eq!( + DefaultTimeExtentMaker::now_after( + &TIME_EXTENT_VAL.increment, + &Duration::from_secs(TIME_EXTENT_VAL.amount * 2) + ) + .unwrap() + .unwrap(), + TIME_EXTENT_VAL + ); + } + + #[test] + fn it_should_fail_for_zero() { + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); + + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); + } + + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + mod fn_now_before { + use std::time::Duration; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; + + #[test] + fn it_should_give_a_time_extent() { + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); + + assert_eq!( + DefaultTimeExtentMaker::now_before( + &Base::from_secs(u64::from(u32::MAX)), + &Duration::from_secs(u64::from(u32::MAX)) + ) + .unwrap() + .unwrap(), + TimeExtent { + increment: Base::from_secs(u64::from(u32::MAX)), + amount: 4_294_967_296 + } + ); + } + + #[test] + fn it_should_fail_for_zero() { + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::ZERO), None); + + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::MAX), None); + } + + #[test] + fn it_should_fail_if_amount_exceeds_bounds() { + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!( + DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) + .unwrap() + .unwrap_err(), + u64::try_from(u128::MAX).unwrap_err() + ); + } + } + } +} diff --git a/packages/clock/tests/clock/mod.rs b/packages/clock/tests/clock/mod.rs new file mode 100644 index 000000000..5d94bb83d --- /dev/null +++ b/packages/clock/tests/clock/mod.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; + +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/packages/clock/tests/integration.rs b/packages/clock/tests/integration.rs new file mode 100644 index 000000000..fa500227a --- /dev/null +++ b/packages/clock/tests/integration.rs @@ -0,0 +1,19 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +//mod common; +mod clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Stopped; diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml new file mode 100644 index 000000000..1ba4830ca --- /dev/null +++ b/packages/configuration/Cargo.toml @@ -0,0 +1,30 @@ +[package] +description = "A library to provide configuration to the Torrust Tracker." +keywords = ["config", "library", "settings"] +name = "torrust-tracker-configuration" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +camino = { version = "1", features = ["serde", "serde1"] } +derive_more = { version = "1", features = ["constructor", "display"] } +figment = { version = "0", features = ["env", "test", "toml"] } +serde = { version = "1", features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +serde_with = "3" +thiserror = "1" +toml = "0" +torrust-tracker-located-error = { version = "3.0.0", path = "../located-error" } +url = "2" + +[dev-dependencies] +uuid = { version = "1", features = ["v4"] } diff --git a/packages/configuration/LICENSE b/packages/configuration/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/configuration/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/configuration/README.md b/packages/configuration/README.md new file mode 100644 index 000000000..ccae51d70 --- /dev/null +++ b/packages/configuration/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Configuration + +A library to provide configuration to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-configuration). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs new file mode 100644 index 000000000..1ab3479fa --- /dev/null +++ b/packages/configuration/src/lib.rs @@ -0,0 +1,342 @@ +//! Configuration data structures for [Torrust Tracker](https://docs.rs/torrust-tracker). +//! +//! This module contains the configuration data structures for the +//! Torrust Tracker, which is a `BitTorrent` tracker server. +//! +//! The current version for configuration is [`v2`]. +pub mod v2_0_0; +pub mod validator; + +use std::collections::HashMap; +use std::env; +use std::sync::Arc; +use std::time::Duration; + +use camino::Utf8PathBuf; +use derive_more::{Constructor, Display}; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; +use thiserror::Error; +use torrust_tracker_located_error::{DynError, LocatedError}; + +/// The maximum number of returned peers for a torrent. +pub const TORRENT_PEERS_LIMIT: usize = 74; + +/// Default timeout for sending and receiving packets. And waiting for sockets +/// to be readable and writable. +pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +// Environment variables + +/// The whole `tracker.toml` file content. It has priority over the config file. +/// Even if the file is not on the default path. +const ENV_VAR_CONFIG_TOML: &str = "TORRUST_TRACKER_CONFIG_TOML"; + +/// The `tracker.toml` file location. +pub const ENV_VAR_CONFIG_TOML_PATH: &str = "TORRUST_TRACKER_CONFIG_TOML_PATH"; + +pub type Configuration = v2_0_0::Configuration; +pub type Core = v2_0_0::core::Core; +pub type HealthCheckApi = v2_0_0::health_check_api::HealthCheckApi; +pub type HttpApi = v2_0_0::tracker_api::HttpApi; +pub type HttpTracker = v2_0_0::http_tracker::HttpTracker; +pub type UdpTracker = v2_0_0::udp_tracker::UdpTracker; +pub type Database = v2_0_0::database::Database; +pub type Driver = v2_0_0::database::Driver; +pub type Threshold = v2_0_0::logging::Threshold; + +pub type AccessTokens = HashMap; + +pub const LATEST_VERSION: &str = "2.0.0"; + +/// Info about the configuration specification. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[display("Metadata(app: {app}, purpose: {purpose}, schema_version: {schema_version})")] +pub struct Metadata { + /// The application this configuration is valid for. + #[serde(default = "Metadata::default_app")] + app: App, + + /// The purpose of this parsed file. + #[serde(default = "Metadata::default_purpose")] + purpose: Purpose, + + /// The schema version for the configuration. + #[serde(default = "Metadata::default_schema_version")] + #[serde(flatten)] + schema_version: Version, +} + +impl Default for Metadata { + fn default() -> Self { + Self { + app: Self::default_app(), + purpose: Self::default_purpose(), + schema_version: Self::default_schema_version(), + } + } +} + +impl Metadata { + fn default_app() -> App { + App::TorrustTracker + } + + fn default_purpose() -> Purpose { + Purpose::Configuration + } + + fn default_schema_version() -> Version { + Version::latest() + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[serde(rename_all = "kebab-case")] +pub enum App { + TorrustTracker, +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[serde(rename_all = "lowercase")] +pub enum Purpose { + Configuration, +} + +/// The configuration version. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[serde(rename_all = "lowercase")] +pub struct Version { + #[serde(default = "Version::default_semver")] + schema_version: String, +} + +impl Default for Version { + fn default() -> Self { + Self { + schema_version: Self::default_semver(), + } + } +} + +impl Version { + fn new(semver: &str) -> Self { + Self { + schema_version: semver.to_owned(), + } + } + + fn latest() -> Self { + Self { + schema_version: LATEST_VERSION.to_string(), + } + } + + fn default_semver() -> String { + LATEST_VERSION.to_string() + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Constructor)] +pub struct TrackerPolicy { + // Cleanup job configuration + /// Maximum time in seconds that a peer can be inactive before being + /// considered an inactive peer. If a peer is inactive for more than this + /// time, it will be removed from the torrent peer list. + #[serde(default = "TrackerPolicy::default_max_peer_timeout")] + pub max_peer_timeout: u32, + + /// If enabled the tracker will persist the number of completed downloads. + /// That's how many times a torrent has been downloaded completely. + #[serde(default = "TrackerPolicy::default_persistent_torrent_completed_stat")] + pub persistent_torrent_completed_stat: bool, + + /// If enabled, the tracker will remove torrents that have no peers. + /// The clean up torrent job runs every `inactive_peer_cleanup_interval` + /// seconds and it removes inactive peers. Eventually, the peer list of a + /// torrent could be empty and the torrent will be removed if this option is + /// enabled. + #[serde(default = "TrackerPolicy::default_remove_peerless_torrents")] + pub remove_peerless_torrents: bool, +} + +impl Default for TrackerPolicy { + fn default() -> Self { + Self { + max_peer_timeout: Self::default_max_peer_timeout(), + persistent_torrent_completed_stat: Self::default_persistent_torrent_completed_stat(), + remove_peerless_torrents: Self::default_remove_peerless_torrents(), + } + } +} + +impl TrackerPolicy { + fn default_max_peer_timeout() -> u32 { + 900 + } + + fn default_persistent_torrent_completed_stat() -> bool { + false + } + + fn default_remove_peerless_torrents() -> bool { + true + } +} + +/// Information required for loading config +#[derive(Debug, Default, Clone)] +pub struct Info { + config_toml: Option, + config_toml_path: String, +} + +impl Info { + /// Build Configuration Info + /// + /// # Errors + /// + /// Will return `Err` if unable to obtain a configuration. + /// + #[allow(clippy::needless_pass_by_value)] + pub fn new(default_config_toml_path: String) -> Result { + let env_var_config_toml = ENV_VAR_CONFIG_TOML.to_string(); + let env_var_config_toml_path = ENV_VAR_CONFIG_TOML_PATH.to_string(); + + let config_toml = if let Ok(config_toml) = env::var(env_var_config_toml) { + println!("Loading extra configuration from environment variable:\n {config_toml}"); + Some(config_toml) + } else { + None + }; + + let config_toml_path = if let Ok(config_toml_path) = env::var(env_var_config_toml_path) { + println!("Loading extra configuration from file: `{config_toml_path}` ..."); + config_toml_path + } else { + println!("Loading extra configuration from default configuration file: `{default_config_toml_path}` ..."); + default_config_toml_path + }; + + Ok(Self { + config_toml, + config_toml_path, + }) + } +} + +/// Announce policy +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy, Constructor)] +pub struct AnnouncePolicy { + /// Interval in seconds that the client should wait between sending regular + /// announce requests to the tracker. + /// + /// It's a **recommended** wait time between announcements. + /// + /// This is the standard amount of time that clients should wait between + /// sending consecutive announcements to the tracker. This value is set by + /// the tracker and is typically provided in the tracker's response to a + /// client's initial request. It serves as a guideline for clients to know + /// how often they should contact the tracker for updates on the peer list, + /// while ensuring that the tracker is not overwhelmed with requests. + #[serde(default = "AnnouncePolicy::default_interval")] + pub interval: u32, + + /// Minimum announce interval. Clients must not reannounce more frequently + /// than this. + /// + /// It establishes the shortest allowed wait time. + /// + /// This is an optional parameter in the protocol that the tracker may + /// provide in its response. It sets a lower limit on the frequency at which + /// clients are allowed to send announcements. Clients should respect this + /// value to prevent sending too many requests in a short period, which + /// could lead to excessive load on the tracker or even getting banned by + /// the tracker for not adhering to the rules. + #[serde(default = "AnnouncePolicy::default_interval_min")] + pub interval_min: u32, +} + +impl Default for AnnouncePolicy { + fn default() -> Self { + Self { + interval: Self::default_interval(), + interval_min: Self::default_interval_min(), + } + } +} + +impl AnnouncePolicy { + fn default_interval() -> u32 { + 120 + } + + fn default_interval_min() -> u32 { + 120 + } +} + +/// Errors that can occur when loading the configuration. +#[derive(Error, Debug)] +pub enum Error { + /// Unable to load the configuration from the environment variable. + /// This error only occurs if there is no configuration file and the + /// `TORRUST_TRACKER_CONFIG_TOML` environment variable is not set. + #[error("Unable to load from Environmental Variable: {source}")] + UnableToLoadFromEnvironmentVariable { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("Unable to load from Config File: {source}")] + UnableToLoadFromConfigFile { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + /// Unable to load the configuration from the configuration file. + #[error("Failed processing the configuration: {source}")] + ConfigError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("The error for errors that can never happen.")] + Infallible, + + #[error("Unsupported configuration version: {version}")] + UnsupportedVersion { version: Version }, + + #[error("Missing mandatory configuration option. Option path: {path}")] + MissingMandatoryOption { path: String }, +} + +impl From for Error { + #[track_caller] + fn from(err: figment::Error) -> Self { + Self::ConfigError { + source: (Arc::new(err) as DynError).into(), + } + } +} + +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Default)] +pub struct TslConfig { + /// Path to the SSL certificate file. + #[serde(default = "TslConfig::default_ssl_cert_path")] + pub ssl_cert_path: Utf8PathBuf, + + /// Path to the SSL key file. + #[serde(default = "TslConfig::default_ssl_key_path")] + pub ssl_key_path: Utf8PathBuf, +} + +impl TslConfig { + #[allow(clippy::unnecessary_wraps)] + fn default_ssl_cert_path() -> Utf8PathBuf { + Utf8PathBuf::new() + } + + #[allow(clippy::unnecessary_wraps)] + fn default_ssl_key_path() -> Utf8PathBuf { + Utf8PathBuf::new() + } +} diff --git a/packages/configuration/src/v2_0_0/core.rs b/packages/configuration/src/v2_0_0/core.rs new file mode 100644 index 000000000..ed3e6aeb7 --- /dev/null +++ b/packages/configuration/src/v2_0_0/core.rs @@ -0,0 +1,144 @@ +use derive_more::{Constructor, Display}; +use serde::{Deserialize, Serialize}; + +use super::network::Network; +use crate::v2_0_0::database::Database; +use crate::validator::{SemanticValidationError, Validator}; +use crate::{AnnouncePolicy, TrackerPolicy}; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Core { + /// Announce policy configuration. + #[serde(default = "Core::default_announce_policy")] + pub announce_policy: AnnouncePolicy, + + /// Database configuration. + #[serde(default = "Core::default_database")] + pub database: Database, + + /// Interval in seconds that the cleanup job will run to remove inactive + /// peers from the torrent peer list. + #[serde(default = "Core::default_inactive_peer_cleanup_interval")] + pub inactive_peer_cleanup_interval: u64, + + /// When `true` only approved torrents can be announced in the tracker. + #[serde(default = "Core::default_listed")] + pub listed: bool, + + /// Network configuration. + #[serde(default = "Core::default_network")] + pub net: Network, + + /// When `true` clients require a key to connect and use the tracker. + #[serde(default = "Core::default_private")] + pub private: bool, + + /// Configuration specific when the tracker is running in private mode. + #[serde(default = "Core::default_private_mode")] + pub private_mode: Option, + + /// Tracker policy configuration. + #[serde(default = "Core::default_tracker_policy")] + pub tracker_policy: TrackerPolicy, + + /// Weather the tracker should collect statistics about tracker usage. + /// If enabled, the tracker will collect statistics like the number of + /// connections handled, the number of announce requests handled, etc. + /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more + /// information about the collected metrics. + #[serde(default = "Core::default_tracker_usage_statistics")] + pub tracker_usage_statistics: bool, +} + +impl Default for Core { + fn default() -> Self { + Self { + announce_policy: Self::default_announce_policy(), + database: Self::default_database(), + inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), + listed: Self::default_listed(), + net: Self::default_network(), + private: Self::default_private(), + private_mode: Self::default_private_mode(), + tracker_policy: Self::default_tracker_policy(), + tracker_usage_statistics: Self::default_tracker_usage_statistics(), + } + } +} + +impl Core { + fn default_announce_policy() -> AnnouncePolicy { + AnnouncePolicy::default() + } + + fn default_database() -> Database { + Database::default() + } + + fn default_inactive_peer_cleanup_interval() -> u64 { + 600 + } + + fn default_listed() -> bool { + false + } + + fn default_network() -> Network { + Network::default() + } + + fn default_private() -> bool { + false + } + + fn default_private_mode() -> Option { + if Self::default_private() { + Some(PrivateMode::default()) + } else { + None + } + } + + fn default_tracker_policy() -> TrackerPolicy { + TrackerPolicy::default() + } + fn default_tracker_usage_statistics() -> bool { + true + } +} + +/// Configuration specific when the tracker is running in private mode. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy, Constructor, Display)] +pub struct PrivateMode { + /// A flag to disable expiration date for peer keys. + /// + /// When true, if the keys is not permanent the expiration date will be + /// ignored. The key will be accepted even if it has expired. + #[serde(default = "PrivateMode::default_check_keys_expiration")] + pub check_keys_expiration: bool, +} + +impl Default for PrivateMode { + fn default() -> Self { + Self { + check_keys_expiration: Self::default_check_keys_expiration(), + } + } +} + +impl PrivateMode { + fn default_check_keys_expiration() -> bool { + true + } +} + +impl Validator for Core { + fn validate(&self) -> Result<(), SemanticValidationError> { + if self.private_mode.is_some() && !self.private { + return Err(SemanticValidationError::UselessPrivateModeSection); + } + + Ok(()) + } +} diff --git a/packages/configuration/src/v2_0_0/database.rs b/packages/configuration/src/v2_0_0/database.rs new file mode 100644 index 000000000..c2b24d809 --- /dev/null +++ b/packages/configuration/src/v2_0_0/database.rs @@ -0,0 +1,84 @@ +use serde::{Deserialize, Serialize}; +use url::Url; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Database { + // Database configuration + /// Database driver. Possible values are: `sqlite3`, and `mysql`. + #[serde(default = "Database::default_driver")] + pub driver: Driver, + + /// Database connection string. The format depends on the database driver. + /// For `sqlite3`, the format is `path/to/database.db`, for example: + /// `./storage/tracker/lib/database/sqlite3.db`. + /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for + /// example: `mysql://root:password@localhost:3306/torrust`. + #[serde(default = "Database::default_path")] + pub path: String, +} + +impl Default for Database { + fn default() -> Self { + Self { + driver: Self::default_driver(), + path: Self::default_path(), + } + } +} + +impl Database { + fn default_driver() -> Driver { + Driver::Sqlite3 + } + + fn default_path() -> String { + String::from("./storage/tracker/lib/database/sqlite3.db") + } + + /// Masks secrets in the configuration. + /// + /// # Panics + /// + /// Will panic if the database path for `MySQL` is not a valid URL. + pub fn mask_secrets(&mut self) { + match self.driver { + Driver::Sqlite3 => { + // Nothing to mask + } + Driver::MySQL => { + let mut url = Url::parse(&self.path).expect("path for MySQL driver should be a valid URL"); + url.set_password(Some("***")).expect("url password should be changed"); + self.path = url.to_string(); + } + } + } +} + +/// The database management system used by the tracker. +#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] +#[serde(rename_all = "lowercase")] +pub enum Driver { + /// The `Sqlite3` database driver. + Sqlite3, + /// The `MySQL` database driver. + MySQL, +} + +#[cfg(test)] +mod tests { + + use super::{Database, Driver}; + + #[test] + fn it_should_allow_masking_the_mysql_user_password() { + let mut database = Database { + driver: Driver::MySQL, + path: "mysql://root:password@localhost:3306/torrust".to_string(), + }; + + database.mask_secrets(); + + assert_eq!(database.path, "mysql://root:***@localhost:3306/torrust".to_string()); + } +} diff --git a/packages/configuration/src/v2_0_0/health_check_api.rs b/packages/configuration/src/v2_0_0/health_check_api.rs new file mode 100644 index 000000000..61178fa80 --- /dev/null +++ b/packages/configuration/src/v2_0_0/health_check_api.rs @@ -0,0 +1,30 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +/// Configuration for the Health Check API. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HealthCheckApi { + /// The address the API will bind to. + /// The format is `ip:port`, for example `127.0.0.1:1313`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + #[serde(default = "HealthCheckApi::default_bind_address")] + pub bind_address: SocketAddr, +} + +impl Default for HealthCheckApi { + fn default() -> Self { + Self { + bind_address: Self::default_bind_address(), + } + } +} + +impl HealthCheckApi { + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1313) + } +} diff --git a/packages/configuration/src/v2_0_0/http_tracker.rs b/packages/configuration/src/v2_0_0/http_tracker.rs new file mode 100644 index 000000000..42ec02bf2 --- /dev/null +++ b/packages/configuration/src/v2_0_0/http_tracker.rs @@ -0,0 +1,41 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +use crate::TslConfig; + +/// Configuration for each HTTP tracker. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HttpTracker { + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + #[serde(default = "HttpTracker::default_bind_address")] + pub bind_address: SocketAddr, + + /// TSL config. + #[serde(default = "HttpTracker::default_tsl_config")] + pub tsl_config: Option, +} + +impl Default for HttpTracker { + fn default() -> Self { + Self { + bind_address: Self::default_bind_address(), + tsl_config: Self::default_tsl_config(), + } + } +} + +impl HttpTracker { + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070) + } + + fn default_tsl_config() -> Option { + None + } +} diff --git a/packages/configuration/src/v2_0_0/logging.rs b/packages/configuration/src/v2_0_0/logging.rs new file mode 100644 index 000000000..e7dbe146c --- /dev/null +++ b/packages/configuration/src/v2_0_0/logging.rs @@ -0,0 +1,41 @@ +use serde::{Deserialize, Serialize}; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Logging { + /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, + /// `Debug` and `Trace`. Default is `Info`. + #[serde(default = "Logging::default_threshold")] + pub threshold: Threshold, +} + +impl Default for Logging { + fn default() -> Self { + Self { + threshold: Self::default_threshold(), + } + } +} + +impl Logging { + fn default_threshold() -> Threshold { + Threshold::Info + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] +#[serde(rename_all = "lowercase")] +pub enum Threshold { + /// A threshold lower than all security levels. + Off, + /// Corresponds to the `Error` security level. + Error, + /// Corresponds to the `Warn` security level. + Warn, + /// Corresponds to the `Info` security level. + Info, + /// Corresponds to the `Debug` security level. + Debug, + /// Corresponds to the `Trace` security level. + Trace, +} diff --git a/packages/configuration/src/v2_0_0/mod.rs b/packages/configuration/src/v2_0_0/mod.rs new file mode 100644 index 000000000..5067210bb --- /dev/null +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -0,0 +1,672 @@ +//! Version `1` for [Torrust Tracker](https://docs.rs/torrust-tracker) +//! configuration data structures. +//! +//! This module contains the configuration data structures for the +//! Torrust Tracker, which is a `BitTorrent` tracker server. +//! +//! The configuration is loaded from a [TOML](https://toml.io/en/) file +//! `tracker.toml` in the project root folder or from an environment variable +//! with the same content as the file. +//! +//! Configuration can not only be loaded from a file, but also from an +//! environment variable `TORRUST_TRACKER_CONFIG_TOML`. This is useful when running +//! the tracker in a Docker container or environments where you do not have a +//! persistent storage or you cannot inject a configuration file. Refer to +//! [`Torrust Tracker documentation`](https://docs.rs/torrust-tracker) for more +//! information about how to pass configuration to the tracker. +//! +//! When you run the tracker without providing the configuration via a file or +//! env var, the default configuration is used. +//! +//! # Table of contents +//! +//! - [Sections](#sections) +//! - [Port binding](#port-binding) +//! - [TSL support](#tsl-support) +//! - [Generating self-signed certificates](#generating-self-signed-certificates) +//! - [Default configuration](#default-configuration) +//! +//! ## Sections +//! +//! Each section in the toml structure is mapped to a data structure. For +//! example, the `[http_api]` section (configuration for the tracker HTTP API) +//! is mapped to the [`HttpApi`] structure. +//! +//! > **NOTICE**: some sections are arrays of structures. For example, the +//! > `[[udp_trackers]]` section is an array of [`UdpTracker`] since +//! > you can have multiple running UDP trackers bound to different ports. +//! +//! Please refer to the documentation of each structure for more information +//! about each section. +//! +//! - [`Core configuration`](crate::v2::Configuration) +//! - [`HTTP API configuration`](crate::v2::tracker_api::HttpApi) +//! - [`HTTP Tracker configuration`](crate::v2::http_tracker::HttpTracker) +//! - [`UDP Tracker configuration`](crate::v2::udp_tracker::UdpTracker) +//! - [`Health Check API configuration`](crate::v2::health_check_api::HealthCheckApi) +//! +//! ## Port binding +//! +//! For the API, HTTP and UDP trackers you can bind to a random port by using +//! port `0`. For example, if you want to bind to a random port on all +//! interfaces, use `0.0.0.0:0`. The OS will choose a random free port. +//! +//! ## TSL support +//! +//! For the API and HTTP tracker you can enable TSL by setting `ssl_enabled` to +//! `true` and setting the paths to the certificate and key files. +//! +//! Typically, you will have a `storage` directory like the following: +//! +//! ```text +//! storage/ +//! ├── config.toml +//! └── tracker +//! ├── etc +//! │ └── tracker.toml +//! ├── lib +//! │ ├── database +//! │ │ ├── sqlite3.db +//! │ │ └── sqlite.db +//! │ └── tls +//! │ ├── localhost.crt +//! │ └── localhost.key +//! └── log +//! ``` +//! +//! where the application stores all the persistent data. +//! +//! Alternatively, you could setup a reverse proxy like Nginx or Apache to +//! handle the SSL/TLS part and forward the requests to the tracker. If you do +//! that, you should set [`on_reverse_proxy`](crate::v2::network::Network::on_reverse_proxy) +//! to `true` in the configuration file. It's out of scope for this +//! documentation to explain in detail how to setup a reverse proxy, but the +//! configuration file should be something like this: +//! +//! For [NGINX](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/): +//! +//! ```text +//! # HTTPS only (with SSL - force redirect to HTTPS) +//! +//! server { +//! listen 80; +//! server_name tracker.torrust.com; +//! +//! return 301 https://$host$request_uri; +//! } +//! +//! server { +//! listen 443; +//! server_name tracker.torrust.com; +//! +//! ssl_certificate CERT_PATH +//! ssl_certificate_key CERT_KEY_PATH; +//! +//! location / { +//! proxy_set_header X-Forwarded-For $remote_addr; +//! proxy_pass http://127.0.0.1:6969; +//! } +//! } +//! ``` +//! +//! For [Apache](https://httpd.apache.org/docs/2.4/howto/reverse_proxy.html): +//! +//! ```text +//! # HTTPS only (with SSL - force redirect to HTTPS) +//! +//! +//! ServerAdmin webmaster@tracker.torrust.com +//! ServerName tracker.torrust.com +//! +//! +//! RewriteEngine on +//! RewriteCond %{HTTPS} off +//! RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent] +//! +//! +//! +//! +//! +//! ServerAdmin webmaster@tracker.torrust.com +//! ServerName tracker.torrust.com +//! +//! +//! Order allow,deny +//! Allow from all +//! +//! +//! ProxyPreserveHost On +//! ProxyRequests Off +//! AllowEncodedSlashes NoDecode +//! +//! ProxyPass / http://localhost:3000/ +//! ProxyPassReverse / http://localhost:3000/ +//! ProxyPassReverse / http://tracker.torrust.com/ +//! +//! RequestHeader set X-Forwarded-Proto "https" +//! RequestHeader set X-Forwarded-Port "443" +//! +//! ErrorLog ${APACHE_LOG_DIR}/tracker.torrust.com-error.log +//! CustomLog ${APACHE_LOG_DIR}/tracker.torrust.com-access.log combined +//! +//! SSLCertificateFile CERT_PATH +//! SSLCertificateKeyFile CERT_KEY_PATH +//! +//! +//! ``` +//! +//! ## Generating self-signed certificates +//! +//! For testing purposes, you can use self-signed certificates. +//! +//! Refer to [Let's Encrypt - Certificates for localhost](https://letsencrypt.org/docs/certificates-for-localhost/) +//! for more information. +//! +//! Running the following command will generate a certificate (`localhost.crt`) +//! and key (`localhost.key`) file in your current directory: +//! +//! ```s +//! openssl req -x509 -out localhost.crt -keyout localhost.key \ +//! -newkey rsa:2048 -nodes -sha256 \ +//! -subj '/CN=localhost' -extensions EXT -config <( \ +//! printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") +//! ``` +//! +//! You can then use the generated files in the configuration file: +//! +//! ```s +//! [[http_trackers]] +//! ... +//! +//! [http_trackers.tsl_config] +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" +//! +//! [http_api] +//! ... +//! +//! [http_api.tsl_config] +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" +//! ``` +//! +//! ## Default configuration +//! +//! The default configuration is: +//! +//! ```toml +//! [logging] +//! threshold = "info" +//! +//! [core] +//! inactive_peer_cleanup_interval = 600 +//! listed = false +//! private = false +//! tracker_usage_statistics = true +//! +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 +//! +//! [core.database] +//! driver = "sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! +//! [core.net] +//! external_ip = "0.0.0.0" +//! on_reverse_proxy = false +//! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true +//! +//! [http_api] +//! bind_address = "127.0.0.1:1212" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! [health_check_api] +//! bind_address = "127.0.0.1:1313" +//!``` +pub mod core; +pub mod database; +pub mod health_check_api; +pub mod http_tracker; +pub mod logging; +pub mod network; +pub mod tracker_api; +pub mod udp_tracker; + +use std::fs; +use std::net::IpAddr; + +use figment::providers::{Env, Format, Serialized, Toml}; +use figment::Figment; +use logging::Logging; +use serde::{Deserialize, Serialize}; + +use self::core::Core; +use self::health_check_api::HealthCheckApi; +use self::http_tracker::HttpTracker; +use self::tracker_api::HttpApi; +use self::udp_tracker::UdpTracker; +use crate::validator::{SemanticValidationError, Validator}; +use crate::{Error, Info, Metadata, Version}; + +/// This configuration version +const VERSION_2_0_0: &str = "2.0.0"; + +/// Prefix for env vars that overwrite configuration options. +const CONFIG_OVERRIDE_PREFIX: &str = "TORRUST_TRACKER_CONFIG_OVERRIDE_"; + +/// Path separator in env var names for nested values in configuration. +const CONFIG_OVERRIDE_SEPARATOR: &str = "__"; + +/// Core configuration for the tracker. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default, Clone)] +pub struct Configuration { + /// Configuration metadata. + pub metadata: Metadata, + + /// Logging configuration + pub logging: Logging, + + /// Core configuration. + pub core: Core, + + /// The list of UDP trackers the tracker is running. Each UDP tracker + /// represents a UDP server that the tracker is running and it has its own + /// configuration. + pub udp_trackers: Option>, + + /// The list of HTTP trackers the tracker is running. Each HTTP tracker + /// represents a HTTP server that the tracker is running and it has its own + /// configuration. + pub http_trackers: Option>, + + /// The HTTP API configuration. + pub http_api: Option, + + /// The Health Check API configuration. + pub health_check_api: HealthCheckApi, +} + +impl Configuration { + /// Returns the tracker public IP address id defined in the configuration, + /// and `None` otherwise. + #[must_use] + pub fn get_ext_ip(&self) -> Option { + self.core.net.external_ip.as_ref().map(|external_ip| *external_ip) + } + + /// Saves the default configuration at the given path. + /// + /// # Errors + /// + /// Will return `Err` if `path` is not a valid path or the configuration + /// file cannot be created. + pub fn create_default_configuration_file(path: &str) -> Result { + let config = Configuration::default(); + config.save_to_file(path)?; + Ok(config) + } + + /// Loads the configuration from the `Info` struct. The whole + /// configuration in toml format is included in the `info.tracker_toml` + /// string. + /// + /// Configuration provided via env var has priority over config file path. + /// + /// # Errors + /// + /// Will return `Err` if the environment variable does not exist or has a bad configuration. + pub fn load(info: &Info) -> Result { + // Load configuration provided by the user, prioritizing env vars + let figment = if let Some(config_toml) = &info.config_toml { + Figment::from(Toml::string(config_toml)).merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) + } else { + Figment::from(Toml::file(&info.config_toml_path)) + .merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) + }; + + // Make sure user has provided the mandatory options. + Self::check_mandatory_options(&figment)?; + + // Fill missing options with default values. + let figment = figment.join(Serialized::defaults(Configuration::default())); + + // Build final configuration. + let config: Configuration = figment.extract()?; + + // Make sure the provided schema version matches this version. + if config.metadata.schema_version != Version::new(VERSION_2_0_0) { + return Err(Error::UnsupportedVersion { + version: config.metadata.schema_version, + }); + } + + Ok(config) + } + + /// Some configuration options are mandatory. The tracker will panic if + /// the user doesn't provide an explicit value for them from one of the + /// configuration sources: TOML or ENV VARS. + /// + /// # Errors + /// + /// Will return an error if a mandatory configuration option is only + /// obtained by default value (code), meaning the user hasn't overridden it. + fn check_mandatory_options(figment: &Figment) -> Result<(), Error> { + let mandatory_options = ["metadata.schema_version", "logging.threshold", "core.private", "core.listed"]; + + for mandatory_option in mandatory_options { + figment + .find_value(mandatory_option) + .map_err(|_err| Error::MissingMandatoryOption { + path: mandatory_option.to_owned(), + })?; + } + + Ok(()) + } + + /// Saves the configuration to the configuration file. + /// + /// # Errors + /// + /// Will return `Err` if `filename` does not exist or the user does not have + /// permission to read it. Will also return `Err` if the configuration is + /// not valid or cannot be encoded to TOML. + /// + /// # Panics + /// + /// Will panic if the configuration cannot be written into the file. + pub fn save_to_file(&self, path: &str) -> Result<(), Error> { + fs::write(path, self.to_toml()).expect("Could not write to file!"); + Ok(()) + } + + /// Encodes the configuration to TOML. + /// + /// # Panics + /// + /// Will panic if it can't be converted to TOML. + #[must_use] + fn to_toml(&self) -> String { + // code-review: do we need to use Figment also to serialize into toml? + toml::to_string(self).expect("Could not encode TOML value") + } + + /// Encodes the configuration to JSON. + /// + /// # Panics + /// + /// Will panic if it can't be converted to JSON. + #[must_use] + pub fn to_json(&self) -> String { + // code-review: do we need to use Figment also to serialize into json? + serde_json::to_string_pretty(self).expect("Could not encode JSON value") + } + + /// Masks secrets in the configuration. + #[must_use] + pub fn mask_secrets(mut self) -> Self { + self.core.database.mask_secrets(); + + if let Some(ref mut api) = self.http_api { + api.mask_secrets(); + } + + self + } +} + +impl Validator for Configuration { + fn validate(&self) -> Result<(), SemanticValidationError> { + self.core.validate() + } +} + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr}; + + use crate::v2_0_0::Configuration; + use crate::Info; + + #[cfg(test)] + fn default_config_toml() -> String { + let config = r#"[metadata] + app = "torrust-tracker" + purpose = "configuration" + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + inactive_peer_cleanup_interval = 600 + listed = false + private = false + tracker_usage_statistics = true + + [core.announce_policy] + interval = 120 + interval_min = 120 + + [core.database] + driver = "sqlite3" + path = "./storage/tracker/lib/database/sqlite3.db" + + [core.net] + external_ip = "0.0.0.0" + on_reverse_proxy = false + + [core.tracker_policy] + max_peer_timeout = 900 + persistent_torrent_completed_stat = false + remove_peerless_torrents = true + + [health_check_api] + bind_address = "127.0.0.1:1313" + "# + .lines() + .map(str::trim_start) + .collect::>() + .join("\n"); + config + } + + #[test] + fn configuration_should_have_default_values() { + let configuration = Configuration::default(); + + let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); + + assert_eq!(toml, default_config_toml()); + } + + #[test] + fn configuration_should_contain_the_external_ip() { + let configuration = Configuration::default(); + + assert_eq!( + configuration.core.net.external_ip, + Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + ); + } + + #[test] + fn configuration_should_be_saved_in_a_toml_config_file() { + use std::{env, fs}; + + use uuid::Uuid; + + // Build temp config file path + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); + + // Convert to argument type for Configuration::save_to_file + let config_file_path = temp_file; + let path = config_file_path.to_string_lossy().to_string(); + + let default_configuration = Configuration::default(); + + default_configuration + .save_to_file(&path) + .expect("Could not save configuration to file"); + + let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); + + assert_eq!(contents, default_config_toml()); + } + + #[test] + fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_file() { + figment::Jail::expect_with(|jail| { + jail.create_file( + "tracker.toml", + r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + "#, + )?; + + let info = Info { + config_toml: None, + config_toml_path: "tracker.toml".to_string(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration, Configuration::default()); + + Ok(()) + }); + } + + #[test] + fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_content() { + figment::Jail::expect_with(|_jail| { + let config_toml = r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + "# + .to_string(); + + let info = Info { + config_toml: Some(config_toml), + config_toml_path: String::new(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration, Configuration::default()); + + Ok(()) + }); + } + + #[test] + fn default_configuration_could_be_overwritten_from_a_single_env_var_with_toml_contents() { + figment::Jail::expect_with(|_jail| { + let config_toml = r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + + [core.database] + path = "OVERWRITTEN DEFAULT DB PATH" + "# + .to_string(); + + let info = Info { + config_toml: Some(config_toml), + config_toml_path: String::new(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration.core.database.path, "OVERWRITTEN DEFAULT DB PATH".to_string()); + + Ok(()) + }); + } + + #[test] + fn default_configuration_could_be_overwritten_from_a_toml_config_file() { + figment::Jail::expect_with(|jail| { + jail.create_file( + "tracker.toml", + r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + + [core.database] + path = "OVERWRITTEN DEFAULT DB PATH" + "#, + )?; + + let info = Info { + config_toml: None, + config_toml_path: "tracker.toml".to_string(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration.core.database.path, "OVERWRITTEN DEFAULT DB PATH".to_string()); + + Ok(()) + }); + } + + #[test] + fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_an_env_var() { + figment::Jail::expect_with(|jail| { + jail.set_env("TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN", "NewToken"); + + let info = Info { + config_toml: Some(default_config_toml()), + config_toml_path: String::new(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!( + configuration.http_api.unwrap().access_tokens.get("admin"), + Some("NewToken".to_owned()).as_ref() + ); + + Ok(()) + }); + } +} diff --git a/packages/configuration/src/v2_0_0/network.rs b/packages/configuration/src/v2_0_0/network.rs new file mode 100644 index 000000000..8e53d419c --- /dev/null +++ b/packages/configuration/src/v2_0_0/network.rs @@ -0,0 +1,41 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use serde::{Deserialize, Serialize}; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Network { + /// The external IP address of the tracker. If the client is using a + /// loopback IP address, this IP address will be used instead. If the peer + /// is using a loopback IP address, the tracker assumes that the peer is + /// in the same network as the tracker and will use the tracker's IP + /// address instead. + #[serde(default = "Network::default_external_ip")] + pub external_ip: Option, + + /// Weather the tracker is behind a reverse proxy or not. + /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header + /// sent from the proxy will be used to get the client's IP address. + #[serde(default = "Network::default_on_reverse_proxy")] + pub on_reverse_proxy: bool, +} + +impl Default for Network { + fn default() -> Self { + Self { + external_ip: Self::default_external_ip(), + on_reverse_proxy: Self::default_on_reverse_proxy(), + } + } +} + +impl Network { + #[allow(clippy::unnecessary_wraps)] + fn default_external_ip() -> Option { + Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + } + + fn default_on_reverse_proxy() -> bool { + false + } +} diff --git a/packages/configuration/src/v2_0_0/tracker_api.rs b/packages/configuration/src/v2_0_0/tracker_api.rs new file mode 100644 index 000000000..2da21758b --- /dev/null +++ b/packages/configuration/src/v2_0_0/tracker_api.rs @@ -0,0 +1,88 @@ +use std::collections::HashMap; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +use crate::TslConfig; + +pub type AccessTokens = HashMap; + +/// Configuration for the HTTP API. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HttpApi { + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + #[serde(default = "HttpApi::default_bind_address")] + pub bind_address: SocketAddr, + + /// TSL config. Only used if `ssl_enabled` is true. + #[serde(default = "HttpApi::default_tsl_config")] + pub tsl_config: Option, + + /// Access tokens for the HTTP API. The key is a label identifying the + /// token and the value is the token itself. The token is used to + /// authenticate the user. All tokens are valid for all endpoints and have + /// all permissions. + #[serde(default = "HttpApi::default_access_tokens")] + pub access_tokens: AccessTokens, +} + +impl Default for HttpApi { + fn default() -> Self { + Self { + bind_address: Self::default_bind_address(), + tsl_config: Self::default_tsl_config(), + access_tokens: Self::default_access_tokens(), + } + } +} + +impl HttpApi { + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212) + } + + #[allow(clippy::unnecessary_wraps)] + fn default_tsl_config() -> Option { + None + } + + fn default_access_tokens() -> AccessTokens { + [].iter().cloned().collect() + } + + pub fn add_token(&mut self, key: &str, token: &str) { + self.access_tokens.insert(key.to_string(), token.to_string()); + } + + pub fn mask_secrets(&mut self) { + for token in self.access_tokens.values_mut() { + *token = "***".to_string(); + } + } +} + +#[cfg(test)] +mod tests { + use crate::v2_0_0::tracker_api::HttpApi; + + #[test] + fn default_http_api_configuration_should_not_contains_any_token() { + let configuration = HttpApi::default(); + + assert_eq!(configuration.access_tokens.values().len(), 0); + } + + #[test] + fn http_api_configuration_should_allow_adding_tokens() { + let mut configuration = HttpApi::default(); + + configuration.add_token("admin", "MyAccessToken"); + + assert!(configuration.access_tokens.values().any(|t| t == "MyAccessToken")); + } +} diff --git a/packages/configuration/src/v2_0_0/udp_tracker.rs b/packages/configuration/src/v2_0_0/udp_tracker.rs new file mode 100644 index 000000000..b3d420d72 --- /dev/null +++ b/packages/configuration/src/v2_0_0/udp_tracker.rs @@ -0,0 +1,26 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct UdpTracker { + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + #[serde(default = "UdpTracker::default_bind_address")] + pub bind_address: SocketAddr, +} +impl Default for UdpTracker { + fn default() -> Self { + Self { + bind_address: Self::default_bind_address(), + } + } +} + +impl UdpTracker { + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969) + } +} diff --git a/packages/configuration/src/validator.rs b/packages/configuration/src/validator.rs new file mode 100644 index 000000000..4555b88dd --- /dev/null +++ b/packages/configuration/src/validator.rs @@ -0,0 +1,19 @@ +//! Trait to validate semantic errors. +//! +//! Errors could involve more than one configuration option. Some configuration +//! combinations can be incompatible. +use thiserror::Error; + +/// Errors that can occur validating the configuration. +#[derive(Error, Debug)] +pub enum SemanticValidationError { + #[error("Private mode section in configuration can only be included when the tracker is running in private mode.")] + UselessPrivateModeSection, +} + +pub trait Validator { + /// # Errors + /// + /// Will return an error if the configuration is invalid. + fn validate(&self) -> Result<(), SemanticValidationError>; +} diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml new file mode 100644 index 000000000..637ea3055 --- /dev/null +++ b/packages/located-error/Cargo.toml @@ -0,0 +1,21 @@ +[package] +description = "A library to provide error decorator with the location and the source of the original error." +keywords = ["errors", "helper", "library"] +name = "torrust-tracker-located-error" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +tracing = "0" + +[dev-dependencies] +thiserror = "1" diff --git a/packages/located-error/LICENSE b/packages/located-error/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/located-error/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/located-error/README.md b/packages/located-error/README.md new file mode 100644 index 000000000..c3c18fa49 --- /dev/null +++ b/packages/located-error/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Located Error + +A library to provide an error decorator with the location and the source of the original error. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-located-error). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs new file mode 100644 index 000000000..3cba6042d --- /dev/null +++ b/packages/located-error/src/lib.rs @@ -0,0 +1,138 @@ +//! This crate provides a wrapper around an error that includes the location of +//! the error. +//! +//! ```rust +//! use std::error::Error; +//! use std::panic::Location; +//! use std::sync::Arc; +//! use torrust_tracker_located_error::{Located, LocatedError}; +//! +//! #[derive(thiserror::Error, Debug)] +//! enum TestError { +//! #[error("Test")] +//! Test, +//! } +//! +//! #[track_caller] +//! fn get_caller_location() -> Location<'static> { +//! *Location::caller() +//! } +//! +//! let e = TestError::Test; +//! +//! let b: LocatedError = Located(e).into(); +//! let l = get_caller_location(); +//! +//! assert!(b.to_string().contains("Test, src/lib.rs")); +//! ``` +//! +//! # Credits +//! +//! +use std::error::Error; +use std::panic::Location; +use std::sync::Arc; + +pub type DynError = Arc; + +/// A generic wrapper around an error. +/// +/// Where `E` is the inner error (source error). +pub struct Located(pub E); + +/// A wrapper around an error that includes the location of the error. +#[derive(Debug)] +pub struct LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + source: Arc, + location: Box>, +} + +impl<'a, E> std::fmt::Display for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}, {}", self.source, self.location) + } +} + +impl<'a, E> Error for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync + 'static, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + Some(&self.source) + } +} + +impl<'a, E> Clone for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn clone(&self) -> Self { + LocatedError { + source: self.source.clone(), + location: self.location.clone(), + } + } +} + +#[allow(clippy::from_over_into)] +impl<'a, E> Into> for Located +where + E: Error + Send + Sync, + Arc: Clone, +{ + #[track_caller] + fn into(self) -> LocatedError<'a, E> { + let e = LocatedError { + source: Arc::new(self.0), + location: Box::new(*std::panic::Location::caller()), + }; + tracing::debug!("{e}"); + e + } +} + +#[allow(clippy::from_over_into)] +impl<'a> Into> for DynError { + #[track_caller] + fn into(self) -> LocatedError<'a, dyn std::error::Error + Send + Sync> { + LocatedError { + source: self, + location: Box::new(*std::panic::Location::caller()), + } + } +} + +#[cfg(test)] +mod tests { + use std::panic::Location; + + use super::LocatedError; + use crate::Located; + + #[derive(thiserror::Error, Debug)] + enum TestError { + #[error("Test")] + Test, + } + + #[track_caller] + fn get_caller_location() -> Location<'static> { + *Location::caller() + } + + #[test] + fn error_should_include_location() { + let e = TestError::Test; + + let b: LocatedError<'_, TestError> = Located(e).into(); + let l = get_caller_location(); + + assert_eq!(b.location.file(), l.file()); + } +} diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml new file mode 100644 index 000000000..02a53e3b7 --- /dev/null +++ b/packages/primitives/Cargo.toml @@ -0,0 +1,25 @@ +[package] +description = "A library with the primitive types shared by the Torrust tracker packages." +keywords = ["api", "library", "primitives"] +name = "torrust-tracker-primitives" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +binascii = "0" +derive_more = { version = "1", features = ["constructor"] } +serde = { version = "1", features = ["derive"] } +tdyne-peer-id = "1" +tdyne-peer-id-registry = "0" +thiserror = "1" +zerocopy = "0" diff --git a/packages/primitives/LICENSE b/packages/primitives/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/primitives/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/primitives/README.md b/packages/primitives/README.md new file mode 100644 index 000000000..791955859 --- /dev/null +++ b/packages/primitives/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Primitives + +A library with the primitive types shared by the [Torrust Tracker](https://github.com/torrust/torrust-tracker) packages. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-primitives). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs new file mode 100644 index 000000000..61b40a746 --- /dev/null +++ b/packages/primitives/src/info_hash.rs @@ -0,0 +1,220 @@ +use std::hash::{DefaultHasher, Hash, Hasher}; +use std::ops::{Deref, DerefMut}; +use std::panic::Location; + +use thiserror::Error; +use zerocopy::FromBytes; + +/// `BitTorrent` Info Hash v1 +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +pub struct InfoHash { + data: aquatic_udp_protocol::InfoHash, +} + +pub const INFO_HASH_BYTES_LEN: usize = 20; + +impl InfoHash { + /// Create a new `InfoHash` from a byte slice. + /// + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + let data = aquatic_udp_protocol::InfoHash::read_from(bytes).expect("it should have the exact amount of bytes"); + + Self { data } + } + + /// Returns the `InfoHash` internal byte array. + #[must_use] + pub fn bytes(&self) -> [u8; 20] { + self.0 + } + + /// Returns the `InfoHash` as a hex string. + #[must_use] + pub fn to_hex_string(&self) -> String { + self.to_string() + } +} + +impl Default for InfoHash { + fn default() -> Self { + Self { + data: aquatic_udp_protocol::InfoHash(Default::default()), + } + } +} + +impl From for InfoHash { + fn from(data: aquatic_udp_protocol::InfoHash) -> Self { + Self { data } + } +} + +impl Deref for InfoHash { + type Target = aquatic_udp_protocol::InfoHash; + + fn deref(&self) -> &Self::Target { + &self.data + } +} + +impl DerefMut for InfoHash { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.data + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + Some(self.cmp(other)) + } +} + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self::default(); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = Self::default(); + ret.0.clone_from_slice(data); + ret + } +} + +/// for testing +impl std::convert::From<&DefaultHasher> for InfoHash { + fn from(data: &DefaultHasher) -> InfoHash { + let n = data.finish().to_le_bytes(); + let bytes = [ + n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], + n[3], + ]; + let data = aquatic_udp_protocol::InfoHash(bytes); + Self { data } + } +} + +impl std::convert::From<&i32> for InfoHash { + fn from(n: &i32) -> InfoHash { + let n = n.to_le_bytes(); + let bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n[0], n[1], n[2], n[3]]; + let data = aquatic_udp_protocol::InfoHash(bytes); + Self { data } + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(bytes: [u8; 20]) -> Self { + let data = aquatic_udp_protocol::InfoHash(bytes); + Self { data } + } +} + +/// Errors that can occur when converting from a `Vec` to an `InfoHash`. +#[derive(Error, Debug)] +pub enum ConversionError { + /// Not enough bytes for infohash. An infohash is 20 bytes. + #[error("not enough bytes for infohash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + /// Too many bytes for infohash. An infohash is 20 bytes. + #[error("too many bytes for infohash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a 40 character long string", + )); + } + + let mut res = InfoHash::default(); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a hexadecimal string", + )); + }; + Ok(res) + } +} diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs new file mode 100644 index 000000000..08fc58976 --- /dev/null +++ b/packages/primitives/src/lib.rs @@ -0,0 +1,21 @@ +//! Primitive types for [Torrust Tracker](https://docs.rs/torrust-tracker). +//! +//! This module contains the basic data structures for the [Torrust Tracker](https://docs.rs/torrust-tracker), +//! which is a `BitTorrent` tracker server. These structures are used not only +//! by the tracker server crate, but also by other crates in the Torrust +//! ecosystem. +use std::collections::BTreeMap; +use std::time::Duration; + +use info_hash::InfoHash; + +pub mod info_hash; +pub mod pagination; +pub mod peer; +pub mod swarm_metadata; +pub mod torrent_metrics; + +/// Duration since the Unix Epoch. +pub type DurationSinceUnixEpoch = Duration; + +pub type PersistentTorrents = BTreeMap; diff --git a/packages/primitives/src/pagination.rs b/packages/primitives/src/pagination.rs new file mode 100644 index 000000000..96b5ad662 --- /dev/null +++ b/packages/primitives/src/pagination.rs @@ -0,0 +1,46 @@ +use derive_more::Constructor; +use serde::Deserialize; + +/// A struct to keep information about the page when results are being paginated +#[derive(Deserialize, Copy, Clone, Debug, PartialEq, Constructor)] +pub struct Pagination { + /// The page number, starting at 0 + pub offset: u32, + /// Page size. The number of results per page + pub limit: u32, +} + +impl Pagination { + #[must_use] + pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { + let offset = match offset_option { + Some(offset) => offset, + None => Pagination::default_offset(), + }; + let limit = match limit_option { + Some(offset) => offset, + None => Pagination::default_limit(), + }; + + Self { offset, limit } + } + + #[must_use] + pub fn default_offset() -> u32 { + 0 + } + + #[must_use] + pub fn default_limit() -> u32 { + 4000 + } +} + +impl Default for Pagination { + fn default() -> Self { + Self { + offset: Self::default_offset(), + limit: Self::default_limit(), + } + } +} diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs new file mode 100644 index 000000000..c8ff1791d --- /dev/null +++ b/packages/primitives/src/peer.rs @@ -0,0 +1,563 @@ +//! Peer struct used by the core `Tracker`. +//! +//! A sample peer: +//! +//! ```rust,no_run +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +//! use torrust_tracker_primitives::peer; +//! use std::net::SocketAddr; +//! use std::net::IpAddr; +//! use std::net::Ipv4Addr; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! +//! +//! peer::Peer { +//! peer_id: PeerId(*b"-qB00000000000000000"), +//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), +//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), +//! uploaded: NumberOfBytes::new(0), +//! downloaded: NumberOfBytes::new(0), +//! left: NumberOfBytes::new(0), +//! event: AnnounceEvent::Started, +//! }; +//! ``` + +use std::net::{IpAddr, SocketAddr}; +use std::ops::{Deref, DerefMut}; +use std::sync::Arc; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use serde::Serialize; +use zerocopy::FromBytes as _; + +use crate::DurationSinceUnixEpoch; + +/// Peer struct used by the core `Tracker`. +/// +/// A sample peer: +/// +/// ```rust,no_run +/// use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +/// use torrust_tracker_primitives::peer; +/// use std::net::SocketAddr; +/// use std::net::IpAddr; +/// use std::net::Ipv4Addr; +/// use torrust_tracker_primitives::DurationSinceUnixEpoch; +/// +/// +/// peer::Peer { +/// peer_id: PeerId(*b"-qB00000000000000000"), +/// peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), +/// updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), +/// uploaded: NumberOfBytes::new(0), +/// downloaded: NumberOfBytes::new(0), +/// left: NumberOfBytes::new(0), +/// event: AnnounceEvent::Started, +/// }; +/// ``` +#[derive(Debug, Clone, Serialize, Copy, PartialEq, Eq, Hash)] +pub struct Peer { + /// ID used by the downloader peer + #[serde(serialize_with = "ser_peer_id")] + pub peer_id: PeerId, + /// The IP and port this peer is listening on + pub peer_addr: SocketAddr, + /// The last time the the tracker receive an announce request from this peer (timestamp) + #[serde(serialize_with = "ser_unix_time_value")] + pub updated: DurationSinceUnixEpoch, + /// The total amount of bytes uploaded by this peer so far + #[serde(serialize_with = "ser_number_of_bytes")] + pub uploaded: NumberOfBytes, + /// The total amount of bytes downloaded by this peer so far + #[serde(serialize_with = "ser_number_of_bytes")] + pub downloaded: NumberOfBytes, + /// The number of bytes this peer still has to download + #[serde(serialize_with = "ser_number_of_bytes")] + pub left: NumberOfBytes, + /// This is an optional key which maps to started, completed, or stopped (or empty, which is the same as not being present). + #[serde(serialize_with = "ser_announce_event")] + pub event: AnnounceEvent, +} + +/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. +pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] + ser.serialize_u64(unix_time_value.as_millis() as u64) +} + +#[derive(Serialize)] +pub enum AnnounceEventSer { + Started, + Stopped, + Completed, + None, +} + +/// Serializes a `Announce Event` as a enum. +/// +/// # Errors +/// +/// If will return an error if the internal serializer was to fail. +pub fn ser_announce_event(announce_event: &AnnounceEvent, ser: S) -> Result { + let event_ser = match announce_event { + AnnounceEvent::Started => AnnounceEventSer::Started, + AnnounceEvent::Stopped => AnnounceEventSer::Stopped, + AnnounceEvent::Completed => AnnounceEventSer::Completed, + AnnounceEvent::None => AnnounceEventSer::None, + }; + + ser.serialize_some(&event_ser) +} + +/// Serializes a `Announce Event` as a i64. +/// +/// # Errors +/// +/// If will return an error if the internal serializer was to fail. +pub fn ser_number_of_bytes(number_of_bytes: &NumberOfBytes, ser: S) -> Result { + ser.serialize_i64(number_of_bytes.0.get()) +} + +/// Serializes a `PeerId` as a `peer::Id`. +/// +/// # Errors +/// +/// If will return an error if the internal serializer was to fail. +pub fn ser_peer_id(peer_id: &PeerId, ser: S) -> Result { + let id = Id { data: *peer_id }; + ser.serialize_some(&id) +} + +impl Ord for Peer { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.peer_id.cmp(&other.peer_id) + } +} + +impl PartialOrd for Peer { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.peer_id.cmp(&other.peer_id)) + } +} + +pub trait ReadInfo { + fn is_seeder(&self) -> bool; + fn get_event(&self) -> AnnounceEvent; + fn get_id(&self) -> PeerId; + fn get_updated(&self) -> DurationSinceUnixEpoch; + fn get_address(&self) -> SocketAddr; +} + +impl ReadInfo for Peer { + fn is_seeder(&self) -> bool { + self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> PeerId { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + +impl ReadInfo for Arc { + fn is_seeder(&self) -> bool { + self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> PeerId { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + +impl Peer { + #[must_use] + pub fn is_seeder(&self) -> bool { + self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped + } + + pub fn ip(&mut self) -> IpAddr { + self.peer_addr.ip() + } + + pub fn change_ip(&mut self, new_ip: &IpAddr) { + self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); + } +} + +use std::panic::Location; + +use thiserror::Error; + +/// Error returned when trying to convert an invalid peer id from another type. +/// +/// Usually because the source format does not contain 20 bytes. +#[derive(Error, Debug)] +pub enum IdConversionError { + #[error("not enough bytes for peer id: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + #[error("too many bytes for peer id: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +pub struct Id { + data: PeerId, +} + +impl From for Id { + fn from(id: PeerId) -> Self { + Self { data: id } + } +} + +impl Deref for Id { + type Target = PeerId; + + fn deref(&self) -> &Self::Target { + &self.data + } +} + +impl DerefMut for Id { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.data + } +} + +impl Id { + #[must_use] + pub fn new(number: T) -> Self + where + T: Into, + { + let number: i128 = number.into(); + let number = number.to_le_bytes(); + let bytes = [ + 0u8, 0u8, 0u8, 0u8, number[0], number[1], number[2], number[3], number[4], number[5], number[6], number[7], + number[8], number[9], number[10], number[11], number[12], number[13], number[14], number[15], + ]; + + let data = PeerId(bytes); + Id { data } + } +} + +impl TryFrom> for Id { + type Error = IdConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < PEER_ID_BYTES_LEN { + return Err(IdConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + if bytes.len() > PEER_ID_BYTES_LEN { + return Err(IdConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), PEER_ID_BYTES_LEN}, + }); + } + + let data = PeerId::read_from(&bytes).expect("it should have the correct amount of bytes"); + Ok(Self { data }) + } +} + +impl std::fmt::Display for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.to_hex_string() { + Some(hex) => write!(f, "{hex}"), + None => write!(f, ""), + } + } +} + +pub const PEER_ID_BYTES_LEN: usize = 20; + +impl Id { + #[must_use] + /// Converts to hex string. + /// + /// For the `PeerId` `-qB00000000000000000` it returns `2d71423030303030303030303030303030303030` + /// + /// For example: + /// + ///```text + /// Bytes = Hex + /// -qB00000000000000000 = 2d71423030303030303030303030303030303030 + /// -qB00000000000000000 = 2d 71 42 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 + /// + /// ------------- + /// |Char | Hex | + /// ------------- + /// | - | 2D | + /// | q | 71 | + /// | B | 42 | + /// | 0 | 30 | + /// ------------- + /// ``` + /// + /// Return `None` is some of the bytes are invalid UTF8 values. + /// + /// # Panics + /// + /// It will panic if the `binascii::bin2hex` from a too-small output buffer. + pub fn to_hex_string(&self) -> Option { + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + + match std::str::from_utf8(&tmp) { + Ok(hex) => Some(format!("0x{hex}")), + Err(_) => None, + } + } + + #[must_use] + pub fn get_client_name(&self) -> Option { + let peer_id = tdyne_peer_id::PeerId::from(self.0); + tdyne_peer_id_registry::parse(peer_id).ok().map(|parsed| parsed.client) + } +} + +impl Serialize for Id { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + #[derive(Serialize)] + struct PeerIdInfo { + id: Option, + client: Option, + } + + let obj = PeerIdInfo { + id: self.to_hex_string(), + client: self.get_client_name(), + }; + obj.serialize(serializer) + } +} + +/// Marker Trait for Peer Vectors +pub trait Encoding: From + PartialEq {} + +impl FromIterator for Vec

{ + fn from_iter>(iter: T) -> Self { + let mut peers: Vec

= vec![]; + + for peer in iter { + peers.push(peer.into()); + } + + peers + } +} + +pub mod fixture { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + + use super::{Id, Peer, PeerId}; + use crate::DurationSinceUnixEpoch; + + #[derive(PartialEq, Debug)] + + pub struct PeerBuilder { + peer: Peer, + } + + #[allow(clippy::derivable_impls)] + impl Default for PeerBuilder { + fn default() -> Self { + Self { peer: Peer::default() } + } + } + + impl PeerBuilder { + #[allow(dead_code)] + #[must_use] + pub fn seeder() -> Self { + let peer = Peer { + peer_id: PeerId(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Completed, + }; + + Self { peer } + } + + #[allow(dead_code)] + #[must_use] + pub fn leecher() -> Self { + let peer = Peer { + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(10), + event: AnnounceEvent::Started, + }; + + Self { peer } + } + + #[allow(dead_code)] + #[must_use] + pub fn with_peer_id(mut self, peer_id: &PeerId) -> Self { + self.peer.peer_id = *peer_id; + self + } + + #[allow(dead_code)] + #[must_use] + pub fn with_peer_addr(mut self, peer_addr: &SocketAddr) -> Self { + self.peer.peer_addr = *peer_addr; + self + } + + #[allow(dead_code)] + #[must_use] + pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes::new(left); + self + } + + #[allow(dead_code)] + #[must_use] + pub fn with_no_bytes_pending_to_download(mut self) -> Self { + self.peer.left = NumberOfBytes::new(0); + self + } + + #[allow(dead_code)] + #[must_use] + pub fn last_updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[allow(dead_code)] + #[must_use] + pub fn build(self) -> Peer { + self.into() + } + + #[allow(dead_code)] + #[must_use] + pub fn into(self) -> Peer { + self.peer + } + } + + impl Default for Peer { + fn default() -> Self { + Self { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } + } + } + + impl Default for Id { + fn default() -> Self { + let data = PeerId(*b"-qB00000000000000000"); + Self { data } + } + } +} + +#[cfg(test)] +pub mod test { + mod torrent_peer_id { + use aquatic_udp_protocol::PeerId; + + use crate::peer; + + #[test] + #[should_panic = "NotEnoughBytes"] + fn should_fail_trying_to_convert_from_a_byte_vector_with_less_than_20_bytes() { + let _ = peer::Id::try_from([0; 19].to_vec()).unwrap(); + } + + #[test] + #[should_panic = "TooManyBytes"] + fn should_fail_trying_to_convert_from_a_byte_vector_with_more_than_20_bytes() { + let _ = peer::Id::try_from([0; 21].to_vec()).unwrap(); + } + + #[test] + fn should_be_converted_to_hex_string() { + let id = peer::Id { + data: PeerId(*b"-qB00000000000000000"), + }; + assert_eq!(id.to_hex_string().unwrap(), "0x2d71423030303030303030303030303030303030"); + + let id = peer::Id { + data: PeerId([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]), + }; + assert_eq!(id.to_hex_string().unwrap(), "0x009f9296009f9296009f9296009f9296009f9296"); + } + + #[test] + fn should_be_converted_into_string_type_using_the_hex_string_format() { + let id = peer::Id { + data: PeerId(*b"-qB00000000000000000"), + }; + assert_eq!(id.to_string(), "0x2d71423030303030303030303030303030303030"); + + let id = peer::Id { + data: PeerId([ + 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, 0, 159, 146, 150, + ]), + }; + assert_eq!(id.to_string(), "0x009f9296009f9296009f9296009f9296009f9296"); + } + } +} diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs new file mode 100644 index 000000000..ca880b54d --- /dev/null +++ b/packages/primitives/src/swarm_metadata.rs @@ -0,0 +1,22 @@ +use derive_more::Constructor; + +/// Swarm statistics for one torrent. +/// Swarm metadata dictionary in the scrape response. +/// +/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +pub struct SwarmMetadata { + /// (i.e `completed`): The number of peers that have ever completed downloading + pub downloaded: u32, // + /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) + pub complete: u32, //seeders + /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) + pub incomplete: u32, +} + +impl SwarmMetadata { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} diff --git a/packages/primitives/src/torrent_metrics.rs b/packages/primitives/src/torrent_metrics.rs new file mode 100644 index 000000000..02de02954 --- /dev/null +++ b/packages/primitives/src/torrent_metrics.rs @@ -0,0 +1,25 @@ +use std::ops::AddAssign; + +/// Structure that holds general `Tracker` torrents metrics. +/// +/// Metrics are aggregate values for all torrents. +#[derive(Copy, Clone, Debug, PartialEq, Default)] +pub struct TorrentsMetrics { + /// Total number of seeders for all torrents + pub complete: u64, + /// Total number of peers that have ever completed downloading for all torrents. + pub downloaded: u64, + /// Total number of leechers for all torrents. + pub incomplete: u64, + /// Total number of torrents. + pub torrents: u64, +} + +impl AddAssign for TorrentsMetrics { + fn add_assign(&mut self, rhs: Self) { + self.complete += rhs.complete; + self.downloaded += rhs.downloaded; + self.incomplete += rhs.incomplete; + self.torrents += rhs.torrents; + } +} diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml new file mode 100644 index 000000000..b080c19da --- /dev/null +++ b/packages/test-helpers/Cargo.toml @@ -0,0 +1,19 @@ +[package] +description = "A library providing helpers for testing the Torrust tracker." +keywords = ["helper", "library", "testing"] +name = "torrust-tracker-test-helpers" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +rand = "0" +torrust-tracker-configuration = { version = "3.0.0", path = "../configuration" } diff --git a/packages/test-helpers/LICENSE b/packages/test-helpers/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/test-helpers/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/test-helpers/README.md b/packages/test-helpers/README.md new file mode 100644 index 000000000..7389dce11 --- /dev/null +++ b/packages/test-helpers/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Configuration + +A library providing helpers for testing the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-test-helpers). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs new file mode 100644 index 000000000..dbd8eef9e --- /dev/null +++ b/packages/test-helpers/src/configuration.rs @@ -0,0 +1,172 @@ +//! Tracker configuration factories for testing. +use std::env; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + +use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, Threshold, UdpTracker}; + +use crate::random; + +/// This configuration is used for testing. It generates random config values +/// so they do not collide if you run more than one tracker at the same time. +/// +/// > **NOTICE**: This configuration is not meant to be used in production. +/// +/// > **NOTICE**: Port 0 is used for ephemeral ports, which means that the OS +/// > will assign a random free port for the tracker to use. +/// +/// > **NOTICE**: You can change the log threshold to `debug` to see the logs of the +/// > tracker while running the tests. That can be particularly useful when +/// > debugging tests. +/// +/// # Panics +/// +/// Will panic if it can't convert the temp file path to string +#[must_use] +pub fn ephemeral() -> Configuration { + // todo: disable services that are not needed. + // For example: a test for the UDP tracker should disable the API and HTTP tracker. + + let mut config = Configuration::default(); + + config.logging.threshold = Threshold::Off; // It should always be off here, the tests manage their own logging. + + // Ephemeral socket address for API + let api_port = 0u16; + let mut http_api = HttpApi { + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), api_port), + ..Default::default() + }; + http_api.add_token("admin", "MyAccessToken"); + config.http_api = Some(http_api); + + // Ephemeral socket address for Health Check API + let health_check_api_port = 0u16; + config.health_check_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), health_check_api_port); + + // Ephemeral socket address for UDP tracker + let udp_port = 0u16; + config.udp_trackers = Some(vec![UdpTracker { + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port), + }]); + + // Ephemeral socket address for HTTP tracker + let http_port = 0u16; + config.http_trackers = Some(vec![HttpTracker { + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port), + tsl_config: None, + }]); + + // Ephemeral sqlite database + let temp_directory = env::temp_dir(); + let random_db_id = random::string(16); + let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); + temp_file.to_str().unwrap().clone_into(&mut config.core.database.path); + + config +} + +/// Ephemeral configuration with reverse proxy enabled. +#[must_use] +pub fn ephemeral_with_reverse_proxy() -> Configuration { + let mut cfg = ephemeral(); + + cfg.core.net.on_reverse_proxy = true; + + cfg +} + +/// Ephemeral configuration with reverse proxy disabled. +#[must_use] +pub fn ephemeral_without_reverse_proxy() -> Configuration { + let mut cfg = ephemeral(); + + cfg.core.net.on_reverse_proxy = false; + + cfg +} + +/// Ephemeral configuration with `public` mode. +#[must_use] +pub fn ephemeral_public() -> Configuration { + let mut cfg = ephemeral(); + + cfg.core.private = false; + + cfg +} + +/// Ephemeral configuration with `private` mode. +#[must_use] +pub fn ephemeral_private() -> Configuration { + let mut cfg = ephemeral(); + + cfg.core.private = true; + + cfg +} + +/// Ephemeral configuration with `listed` mode. +#[must_use] +pub fn ephemeral_listed() -> Configuration { + let mut cfg = ephemeral(); + + cfg.core.listed = true; + + cfg +} + +/// Ephemeral configuration with `private_listed` mode. +#[must_use] +pub fn ephemeral_private_and_listed() -> Configuration { + let mut cfg = ephemeral(); + + cfg.core.private = true; + cfg.core.listed = true; + + cfg +} + +/// Ephemeral configuration with a custom external (public) IP for the tracker. +#[must_use] +pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { + let mut cfg = ephemeral(); + + cfg.core.net.external_ip = Some(ip); + + cfg +} + +/// Ephemeral configuration using a wildcard IPv6 for the UDP, HTTP and API +/// services. +#[must_use] +pub fn ephemeral_ipv6() -> Configuration { + let mut cfg = ephemeral(); + + let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0); + + if let Some(ref mut http_api) = cfg.http_api { + http_api.bind_address.clone_from(&ipv6); + }; + + if let Some(ref mut http_trackers) = cfg.http_trackers { + http_trackers[0].bind_address.clone_from(&ipv6); + } + + if let Some(ref mut udp_trackers) = cfg.udp_trackers { + udp_trackers[0].bind_address.clone_from(&ipv6); + } + + cfg +} + +/// Ephemeral without running any services. +#[must_use] +pub fn ephemeral_with_no_services() -> Configuration { + let mut cfg = ephemeral(); + + cfg.http_api = None; + cfg.http_trackers = None; + cfg.udp_trackers = None; + + cfg +} diff --git a/packages/test-helpers/src/lib.rs b/packages/test-helpers/src/lib.rs new file mode 100644 index 000000000..e66ea2adc --- /dev/null +++ b/packages/test-helpers/src/lib.rs @@ -0,0 +1,5 @@ +//! Testing helpers for [Torrust Tracker](https://docs.rs/torrust-tracker). +//! +//! A collection of functions and types to help with testing the tracker server. +pub mod configuration; +pub mod random; diff --git a/packages/test-helpers/src/random.rs b/packages/test-helpers/src/random.rs new file mode 100644 index 000000000..2133dcd29 --- /dev/null +++ b/packages/test-helpers/src/random.rs @@ -0,0 +1,10 @@ +//! Random data generators for testing. +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; + +/// Returns a random alphanumeric string of a certain size. +/// +/// It is useful for generating random names, IDs, etc for testing. +pub fn string(size: usize) -> String { + thread_rng().sample_iter(&Alphanumeric).take(size).map(char::from).collect() +} diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml new file mode 100644 index 000000000..ba6a76f29 --- /dev/null +++ b/packages/torrent-repository/Cargo.toml @@ -0,0 +1,37 @@ +[package] +description = "A library that provides a repository of torrents files and their peers." +keywords = ["library", "repository", "torrents"] +name = "torrust-tracker-torrent-repository" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +crossbeam-skiplist = "0" +dashmap = "6" +futures = "0" +parking_lot = "0" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-clock = { version = "3.0.0", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0", path = "../primitives" } +zerocopy = "0" + +[dev-dependencies] +async-std = { version = "1", features = ["attributes", "tokio1"] } +criterion = { version = "0", features = ["async_tokio"] } +rstest = "0" + +[[bench]] +harness = false +name = "repository_benchmark" diff --git a/packages/torrent-repository/README.md b/packages/torrent-repository/README.md new file mode 100644 index 000000000..ffc71f1d7 --- /dev/null +++ b/packages/torrent-repository/README.md @@ -0,0 +1,32 @@ +# Torrust Tracker Torrent Repository + +A library to provide a torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Benchmarking + +```console +cargo bench -p torrust-tracker-torrent-repository +``` + +Example partial output: + +```output + Running benches/repository_benchmark.rs (target/release/deps/repository_benchmark-a9b0013c8d09c3c3) +add_one_torrent/RwLockStd + time: [63.057 ns 63.242 ns 63.506 ns] +Found 12 outliers among 100 measurements (12.00%) + 2 (2.00%) low severe + 2 (2.00%) low mild + 2 (2.00%) high mild + 6 (6.00%) high severe +add_one_torrent/RwLockStdMutexStd + time: [62.505 ns 63.077 ns 63.817 ns] +``` + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-repository). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs new file mode 100644 index 000000000..08862abc8 --- /dev/null +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -0,0 +1,153 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use futures::stream::FuturesUnordered; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::RepositoryAsync; + +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; + +pub async fn add_one_torrent(samples: u64) -> Duration +where + V: RepositoryAsync + Default, +{ + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository = V::default(); + + let info_hash = InfoHash::default(); + + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER).await; + + torrent_repository.get_swarm_metadata(&info_hash).await; + } + + start.elapsed() +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hash = InfoHash::default(); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER).await; + + torrent_repository.get_swarm_metadata(&info_hash).await; + + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + + torrent_repository_clone.get_swarm_metadata(&info_hash).await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + + torrent_repository_clone.get_swarm_metadata(&info_hash).await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Async update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER).await; + torrent_repository.get_swarm_metadata(info_hash).await; + } + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + torrent_repository_clone.get_swarm_metadata(&info_hash).await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} diff --git a/packages/torrent-repository/benches/helpers/mod.rs b/packages/torrent-repository/benches/helpers/mod.rs new file mode 100644 index 000000000..1026aa4bf --- /dev/null +++ b/packages/torrent-repository/benches/helpers/mod.rs @@ -0,0 +1,3 @@ +pub mod asyn; +pub mod sync; +pub mod utils; diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs new file mode 100644 index 000000000..77055911d --- /dev/null +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -0,0 +1,155 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use futures::stream::FuturesUnordered; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::Repository; + +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; + +// Simply add one torrent +#[must_use] +pub fn add_one_torrent(samples: u64) -> Duration +where + V: Repository + Default, +{ + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository = V::default(); + + let info_hash = InfoHash::default(); + + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER); + + torrent_repository.get_swarm_metadata(&info_hash); + } + + start.elapsed() +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hash = InfoHash::default(); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER); + + torrent_repository.get_swarm_metadata(&info_hash); + + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + + torrent_repository_clone.get_swarm_metadata(&info_hash); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + + torrent_repository_clone.get_swarm_metadata(&info_hash); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER); + torrent_repository.get_swarm_metadata(info_hash); + } + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + torrent_repository_clone.get_swarm_metadata(&info_hash); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs new file mode 100644 index 000000000..e21ac7332 --- /dev/null +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -0,0 +1,41 @@ +use std::collections::HashSet; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use zerocopy::I64; + +pub const DEFAULT_PEER: Peer = Peer { + peer_id: PeerId([0; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::from_secs(0), + uploaded: NumberOfBytes(I64::ZERO), + downloaded: NumberOfBytes(I64::ZERO), + left: NumberOfBytes(I64::ZERO), + event: AnnounceEvent::Started, +}; + +#[must_use] +#[allow(clippy::missing_panics_doc)] +pub fn generate_unique_info_hashes(size: usize) -> Vec { + let mut result = HashSet::new(); + + let mut bytes = [0u8; 20]; + + #[allow(clippy::cast_possible_truncation)] + for i in 0..size { + bytes[0] = (i & 0xFF) as u8; + bytes[1] = ((i >> 8) & 0xFF) as u8; + bytes[2] = ((i >> 16) & 0xFF) as u8; + bytes[3] = ((i >> 24) & 0xFF) as u8; + + let info_hash = InfoHash::from_bytes(&bytes); + result.insert(info_hash); + } + + assert_eq!(result.len(), size); + + result.into_iter().collect() +} diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs new file mode 100644 index 000000000..4e50f1454 --- /dev/null +++ b/packages/torrent-repository/benches/repository_benchmark.rs @@ -0,0 +1,270 @@ +use std::time::Duration; + +mod helpers; + +use criterion::{criterion_group, criterion_main, Criterion}; +use torrust_tracker_torrent_repository::{ + TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, TorrentsSkipMapMutexStd, + TorrentsSkipMapRwLockParkingLot, +}; + +use crate::helpers::{asyn, sync}; + +fn add_one_torrent(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_one_torrent"); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt).iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.finish(); +} + +fn add_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_one_torrent_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_one_torrent_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt).iter_custom(|iters| { + asyn::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt).iter_custom(|iters| { + sync::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt).iter_custom(|iters| { + sync::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +criterion_group!( + benches, + add_one_torrent, + add_multiple_torrents_in_parallel, + update_one_torrent_in_parallel, + update_multiple_torrents_in_parallel +); +criterion_main!(benches); diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs new file mode 100644 index 000000000..b920839d9 --- /dev/null +++ b/packages/torrent-repository/src/entry/mod.rs @@ -0,0 +1,92 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use self::peer_list::PeerList; + +pub mod mutex_parking_lot; +pub mod mutex_std; +pub mod mutex_tokio; +pub mod peer_list; +pub mod rw_lock_parking_lot; +pub mod single; + +pub trait Entry { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_swarm_metadata(&self) -> SwarmMetadata; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> bool; + + /// Returns the number of Peers + fn get_peers_len(&self) -> usize; + + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> Vec>; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; + + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. + fn upsert_peer(&mut self, peer: &peer::Peer) -> bool; + + /// It removes peer from the swarm that have not been updated for more than `current_cutoff` seconds + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntrySync { + fn get_swarm_metadata(&self) -> SwarmMetadata; + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; + fn peers_is_empty(&self) -> bool; + fn get_peers_len(&self) -> usize; + fn get_peers(&self, limit: Option) -> Vec>; + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; + fn upsert_peer(&self, peer: &peer::Peer) -> bool; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntryAsync { + fn get_swarm_metadata(&self) -> impl std::future::Future + Send; + fn meets_retaining_policy(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn peers_is_empty(&self) -> impl std::future::Future + Send; + fn get_peers_len(&self) -> impl std::future::Future + Send; + fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; + fn get_peers_for_client( + &self, + client: &SocketAddr, + limit: Option, + ) -> impl std::future::Future>> + Send; + fn upsert_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; + fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; +} + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Torrent { + /// A network of peers that are all trying to download the torrent associated to this entry + pub(crate) swarm: PeerList, + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub(crate) downloaded: u32, +} diff --git a/packages/torrent-repository/src/entry/mutex_parking_lot.rs b/packages/torrent-repository/src/entry/mutex_parking_lot.rs new file mode 100644 index 000000000..738c3ff9d --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_parking_lot.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryMutexParkingLot, EntrySingle}; + +impl EntrySync for EntryMutexParkingLot { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().get_swarm_metadata() + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.lock().meets_retaining_policy(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.lock().get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.lock().upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.lock().remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexParkingLot { + fn from(entry: EntrySingle) -> Self { + Arc::new(parking_lot::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs new file mode 100644 index 000000000..0ab70a96f --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_std.rs @@ -0,0 +1,51 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle}; + +impl EntrySync for EntryMutexStd { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().expect("it should get a lock").get_swarm_metadata() + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").meets_retaining_policy(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().expect("it should get a lock").peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.lock().expect("it should get a lock").get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.lock() + .expect("it should lock the entry") + .remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexStd { + fn from(entry: EntrySingle) -> Self { + Arc::new(std::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs new file mode 100644 index 000000000..6db789a72 --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_tokio.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle}; + +impl EntryAsync for EntryMutexTokio { + async fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().await.get_swarm_metadata() + } + + async fn meets_retaining_policy(self, policy: &TrackerPolicy) -> bool { + self.lock().await.meets_retaining_policy(policy) + } + + async fn peers_is_empty(&self) -> bool { + self.lock().await.peers_is_empty() + } + + async fn get_peers_len(&self) -> usize { + self.lock().await.get_peers_len() + } + + async fn get_peers(&self, limit: Option) -> Vec> { + self.lock().await.get_peers(limit) + } + + async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().await.get_peers_for_client(client, limit) + } + + async fn upsert_peer(self, peer: &peer::Peer) -> bool { + self.lock().await.upsert_peer(peer) + } + + async fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) { + self.lock().await.remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexTokio { + fn from(entry: EntrySingle) -> Self { + Arc::new(tokio::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/peer_list.rs b/packages/torrent-repository/src/entry/peer_list.rs new file mode 100644 index 000000000..33270cf27 --- /dev/null +++ b/packages/torrent-repository/src/entry/peer_list.rs @@ -0,0 +1,286 @@ +//! A peer list. +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::PeerId; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +// code-review: the current implementation uses the peer Id as the ``BTreeMap`` +// key. That would allow adding two identical peers except for the Id. +// For example, two peers with the same socket address but a different peer Id +// would be allowed. That would lead to duplicated peers in the tracker responses. + +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PeerList { + peers: std::collections::BTreeMap>, +} + +impl PeerList { + #[must_use] + pub fn len(&self) -> usize { + self.peers.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.peers.is_empty() + } + + pub fn upsert(&mut self, value: Arc) -> Option> { + self.peers.insert(value.peer_id, value) + } + + pub fn remove(&mut self, key: &PeerId) -> Option> { + self.peers.remove(key) + } + + pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.peers + .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); + } + + #[must_use] + pub fn get(&self, peer_id: &PeerId) -> Option<&Arc> { + self.peers.get(peer_id) + } + + #[must_use] + pub fn get_all(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + #[must_use] + pub fn seeders_and_leechers(&self) -> (usize, usize) { + let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); + let leechers = self.len() - seeders; + + (seeders, leechers) + } + + #[must_use] + pub fn get_peers_excluding_addr(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) + .cloned() + .collect(), + } + } +} + +#[cfg(test)] +mod tests { + + mod it_should { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::PeerId; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::entry::peer_list::PeerList; + + #[test] + fn be_empty_when_no_peers_have_been_inserted() { + let peer_list = PeerList::default(); + + assert!(peer_list.is_empty()); + } + + #[test] + fn have_zero_length_when_no_peers_have_been_inserted() { + let peer_list = PeerList::default(); + + assert_eq!(peer_list.len(), 0); + } + + #[test] + fn allow_inserting_a_new_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + assert_eq!(peer_list.upsert(peer.into()), None); + } + + #[test] + fn allow_updating_a_preexisting_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.upsert(peer.into()), Some(Arc::new(peer))); + } + + #[test] + fn allow_getting_all_peers() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.get_all(None), [Arc::new(peer)]); + } + + #[test] + fn allow_getting_one_peer_by_id() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); + } + + #[test] + fn increase_the_number_of_peers_after_inserting_a_new_one() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.len(), 1); + } + + #[test] + fn decrease_the_number_of_peers_after_removing_one() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + peer_list.remove(&peer.peer_id); + + assert!(peer_list.is_empty()); + } + + #[test] + fn allow_removing_an_existing_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + peer_list.remove(&peer.peer_id); + + assert_eq!(peer_list.get(&peer.peer_id), None); + } + + #[test] + fn allow_getting_all_peers_excluding_peers_with_a_given_address() { + let mut peer_list = PeerList::default(); + + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + peer_list.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + peer_list.upsert(peer2.into()); + + assert_eq!(peer_list.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + } + + #[test] + fn return_the_number_of_seeders_in_the_list() { + let mut peer_list = PeerList::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + peer_list.upsert(seeder.into()); + peer_list.upsert(leecher.into()); + + let (seeders, _leechers) = peer_list.seeders_and_leechers(); + + assert_eq!(seeders, 1); + } + + #[test] + fn return_the_number_of_leechers_in_the_list() { + let mut peer_list = PeerList::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + peer_list.upsert(seeder.into()); + peer_list.upsert(leecher.into()); + + let (_seeders, leechers) = peer_list.seeders_and_leechers(); + + assert_eq!(leechers, 1); + } + + #[test] + fn remove_inactive_peers() { + let mut peer_list = PeerList::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + peer_list.upsert(peer.into()); + + // Remove peers not updated since one second after inserting the peer + peer_list.remove_inactive_peers(last_update_time + one_second); + + assert_eq!(peer_list.len(), 0); + } + + #[test] + fn not_remove_active_peers() { + let mut peer_list = PeerList::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + peer_list.upsert(peer.into()); + + // Remove peers not updated since one second before inserting the peer. + peer_list.remove_inactive_peers(last_update_time - one_second); + + assert_eq!(peer_list.len(), 1); + } + + #[test] + fn allow_inserting_two_identical_peers_except_for_the_id() { + let mut peer_list = PeerList::default(); + + let peer1 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); + peer_list.upsert(peer1.into()); + + let peer2 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000002")).build(); + peer_list.upsert(peer2.into()); + + assert_eq!(peer_list.len(), 2); + } + } +} diff --git a/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs b/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs new file mode 100644 index 000000000..ac0dc0b30 --- /dev/null +++ b/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryRwLockParkingLot, EntrySingle}; + +impl EntrySync for EntryRwLockParkingLot { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.read().get_swarm_metadata() + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.read().meets_retaining_policy(policy) + } + + fn peers_is_empty(&self) -> bool { + self.read().peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.read().get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.read().get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.read().get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.write().upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.write().remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryRwLockParkingLot { + fn from(entry: EntrySingle) -> Self { + Arc::new(parking_lot::RwLock::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs new file mode 100644 index 000000000..7f8cfc4e6 --- /dev/null +++ b/packages/torrent-repository/src/entry/single.rs @@ -0,0 +1,79 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::AnnounceEvent; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::peer::{self}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::Entry; +use crate::EntrySingle; + +impl Entry for EntrySingle { + #[allow(clippy::cast_possible_truncation)] + fn get_swarm_metadata(&self) -> SwarmMetadata { + let (seeders, leechers) = self.swarm.seeders_and_leechers(); + + SwarmMetadata { + downloaded: self.downloaded, + complete: seeders as u32, + incomplete: leechers as u32, + } + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.downloaded > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.swarm.is_empty() { + return false; + } + + true + } + + fn peers_is_empty(&self) -> bool { + self.swarm.is_empty() + } + + fn get_peers_len(&self) -> usize { + self.swarm.len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.swarm.get_all(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.swarm.get_peers_excluding_addr(client, limit) + } + + fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { + let mut downloaded_stats_updated: bool = false; + + match peer::ReadInfo::get_event(peer) { + AnnounceEvent::Stopped => { + drop(self.swarm.remove(&peer::ReadInfo::get_id(peer))); + } + AnnounceEvent::Completed => { + let previous = self.swarm.upsert(Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.downloaded += 1; + downloaded_stats_updated = true; + } + } + _ => { + drop(self.swarm.upsert(Arc::new(*peer))); + } + } + + downloaded_stats_updated + } + + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.swarm.remove_inactive_peers(current_cutoff); + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs new file mode 100644 index 000000000..a8955808e --- /dev/null +++ b/packages/torrent-repository/src/lib.rs @@ -0,0 +1,44 @@ +use std::sync::Arc; + +use repository::dash_map_mutex_std::XacrimonDashMap; +use repository::rw_lock_std::RwLockStd; +use repository::rw_lock_tokio::RwLockTokio; +use repository::skip_map_mutex_std::CrossbeamSkipList; +use torrust_tracker_clock::clock; + +pub mod entry; +pub mod repository; + +// Repo Entries + +pub type EntrySingle = entry::Torrent; +pub type EntryMutexStd = Arc>; +pub type EntryMutexTokio = Arc>; +pub type EntryMutexParkingLot = Arc>; +pub type EntryRwLockParkingLot = Arc>; + +// Repos + +pub type TorrentsRwLockStd = RwLockStd; +pub type TorrentsRwLockStdMutexStd = RwLockStd; +pub type TorrentsRwLockStdMutexTokio = RwLockStd; +pub type TorrentsRwLockTokio = RwLockTokio; +pub type TorrentsRwLockTokioMutexStd = RwLockTokio; +pub type TorrentsRwLockTokioMutexTokio = RwLockTokio; + +pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; +pub type TorrentsSkipMapMutexParkingLot = CrossbeamSkipList; +pub type TorrentsSkipMapRwLockParkingLot = CrossbeamSkipList; + +pub type TorrentsDashMapMutexStd = XacrimonDashMap; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs new file mode 100644 index 000000000..4354c12ec --- /dev/null +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -0,0 +1,108 @@ +use std::sync::Arc; + +use dashmap::DashMap; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle}; + +#[derive(Default, Debug)] +pub struct XacrimonDashMap { + pub torrents: DashMap, +} + +impl Repository for XacrimonDashMap +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + if let Some(entry) = self.torrents.get(info_hash) { + entry.upsert_peer(peer); + } else { + let _unused = self.torrents.insert(*info_hash, Arc::default()); + if let Some(entry) = self.torrents.get(info_hash) { + entry.upsert_peer(peer); + } + } + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + self.torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|(_key, value)| value.clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + self.torrents.retain(|_, entry| entry.meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs new file mode 100644 index 000000000..f198288f8 --- /dev/null +++ b/packages/torrent-repository/src/repository/mod.rs @@ -0,0 +1,42 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +pub mod dash_map_mutex_std; +pub mod rw_lock_std; +pub mod rw_lock_std_mutex_std; +pub mod rw_lock_std_mutex_tokio; +pub mod rw_lock_tokio; +pub mod rw_lock_tokio_mutex_std; +pub mod rw_lock_tokio_mutex_tokio; +pub mod skip_map_mutex_std; + +use std::fmt::Debug; + +pub trait Repository: Debug + Default + Sized + 'static { + fn get(&self, key: &InfoHash) -> Option; + fn get_metrics(&self) -> TorrentsMetrics; + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents); + fn remove(&self, key: &InfoHash) -> Option; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); + fn remove_peerless_torrents(&self, policy: &TrackerPolicy); + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer); + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; +} + +#[allow(clippy::module_name_repetitions)] +pub trait RepositoryAsync: Debug + Default + Sized + 'static { + fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; + fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> impl std::future::Future + Send; + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future> + Send; +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs new file mode 100644 index 000000000..5439fdd79 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -0,0 +1,131 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockStd}; + +#[derive(Default, Debug)] +pub struct RwLockStd { + pub(crate) torrents: std::sync::RwLock>, +} + +impl RwLockStd { + /// # Panics + /// + /// Panics if unable to get a lock. + pub fn write( + &self, + ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + self.torrents.write().expect("it should get lock") + } +} + +impl TorrentsRwLockStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl Repository for TorrentsRwLockStd +where + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let mut db = self.get_torrents_mut(); + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).map(|entry| entry.get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, downloaded) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + swarm: PeerList::default(), + downloaded: *downloaded, + }; + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut(); + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs new file mode 100644 index 000000000..7d58b0b10 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -0,0 +1,129 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockStdMutexStd}; + +impl TorrentsRwLockStdMutexStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Repository for TorrentsRwLockStdMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get_torrents() + .get(info_hash) + .map(super::super::entry::EntrySync::get_swarm_metadata) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.lock().expect("it should get a lock").get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents(); + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs new file mode 100644 index 000000000..90451ca9f --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -0,0 +1,161 @@ +use std::iter::zip; +use std::pin::Pin; +use std::sync::Arc; + +use futures::future::join_all; +use futures::{Future, FutureExt}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockStdMutexTokio}; + +impl TorrentsRwLockStdMutexTokio { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl RepositoryAsync for TorrentsRwLockStdMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer).await; + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + match maybe_entry { + Some(entry) => Some(entry.get_swarm_metadata().await), + None => None, + } + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + let entries: Vec<_> = self.get_torrents().values().cloned().collect(); + + for entry in entries { + let stats = entry.lock().await.get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let handles: Vec + Send>>>; + { + let db = self.get_torrents(); + handles = db + .values() + .cloned() + .map(|e| e.remove_inactive_peers(current_cutoff).boxed()) + .collect(); + } + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let handles: Vec> + Send>>>; + + { + let db = self.get_torrents(); + + handles = zip(db.keys().copied(), db.values().cloned()) + .map(|(infohash, torrent)| { + torrent + .meets_retaining_policy(policy) + .map(move |should_be_retained| if should_be_retained { None } else { Some(infohash) }) + .boxed() + }) + .collect::>(); + } + + let not_good = join_all(handles).await; + + let mut db = self.get_torrents_mut(); + + for remove in not_good.into_iter().flatten() { + drop(db.remove(&remove)); + } + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs new file mode 100644 index 000000000..baaa01232 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -0,0 +1,135 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockTokio}; + +#[derive(Default, Debug)] +pub struct RwLockTokio { + pub(crate) torrents: tokio::sync::RwLock>, +} + +impl RwLockTokio { + pub fn write( + &self, + ) -> impl std::future::Future< + Output = tokio::sync::RwLockWriteGuard< + '_, + std::collections::BTreeMap, + >, + > { + self.torrents.write() + } +} + +impl TorrentsRwLockTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokio +where + EntrySingle: Entry, +{ + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let mut db = self.get_torrents_mut().await; + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.upsert_peer(peer); + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut().await; + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs new file mode 100644 index 000000000..1887f70c7 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -0,0 +1,129 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockTokioMutexStd}; + +impl TorrentsRwLockTokioMutexStd { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer); + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs new file mode 100644 index 000000000..6c9c08a73 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -0,0 +1,142 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockTokioMutexTokio}; + +impl TorrentsRwLockTokioMutexTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer).await; + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + match self.get(info_hash).await { + Some(entry) => Some(entry.get_swarm_metadata().await), + None => None, + } + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_swarm_metadata().await; + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff).await; + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + let mut not_good = Vec::::default(); + + for (&infohash, torrent) in db.iter() { + if !torrent.clone().meets_retaining_policy(policy).await { + not_good.push(infohash); + } + } + + for remove in not_good { + drop(db.remove(&remove)); + } + } +} diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs new file mode 100644 index 000000000..dd0d9c1b1 --- /dev/null +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -0,0 +1,292 @@ +use std::sync::Arc; + +use crossbeam_skiplist::SkipMap; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexParkingLot, EntryMutexStd, EntryRwLockParkingLot, EntrySingle}; + +#[derive(Default, Debug)] +pub struct CrossbeamSkipList { + pub torrents: SkipMap, +} + +impl Repository for CrossbeamSkipList +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().meets_retaining_policy(policy) { + continue; + } + + entry.remove(); + } + } +} + +impl Repository for CrossbeamSkipList +where + EntryRwLockParkingLot: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().read().get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryRwLockParkingLot)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryRwLockParkingLot::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().meets_retaining_policy(policy) { + continue; + } + + entry.remove(); + } + } +} + +impl Repository for CrossbeamSkipList +where + EntryMutexParkingLot: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexParkingLot)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexParkingLot::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().meets_retaining_policy(policy) { + continue; + } + + entry.remove(); + } + } +} diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs new file mode 100644 index 000000000..efdf7f742 --- /dev/null +++ b/packages/torrent-repository/tests/common/mod.rs @@ -0,0 +1,3 @@ +pub mod repo; +pub mod torrent; +pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs new file mode 100644 index 000000000..f317d0d17 --- /dev/null +++ b/packages/torrent-repository/tests/common/repo.rs @@ -0,0 +1,238 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; +use torrust_tracker_torrent_repository::{ + EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, + TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, + TorrentsSkipMapMutexStd, TorrentsSkipMapRwLockParkingLot, +}; + +#[derive(Debug)] +pub(crate) enum Repo { + RwLockStd(TorrentsRwLockStd), + RwLockStdMutexStd(TorrentsRwLockStdMutexStd), + RwLockStdMutexTokio(TorrentsRwLockStdMutexTokio), + RwLockTokio(TorrentsRwLockTokio), + RwLockTokioMutexStd(TorrentsRwLockTokioMutexStd), + RwLockTokioMutexTokio(TorrentsRwLockTokioMutexTokio), + SkipMapMutexStd(TorrentsSkipMapMutexStd), + SkipMapMutexParkingLot(TorrentsSkipMapMutexParkingLot), + SkipMapRwLockParkingLot(TorrentsSkipMapRwLockParkingLot), + DashMapMutexStd(TorrentsDashMapMutexStd), +} + +impl Repo { + pub(crate) async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + match self { + Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::RwLockStdMutexStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::RwLockStdMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::RwLockTokio(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::RwLockTokioMutexStd(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::RwLockTokioMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::SkipMapMutexParkingLot(repo) => repo.upsert_peer(info_hash, peer), + Repo::SkipMapRwLockParkingLot(repo) => repo.upsert_peer(info_hash, peer), + Repo::DashMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), + } + } + + pub(crate) async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::RwLockStdMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::RwLockStdMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokioMutexStd(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokioMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::SkipMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::SkipMapMutexParkingLot(repo) => repo.get_swarm_metadata(info_hash), + Repo::SkipMapRwLockParkingLot(repo) => repo.get_swarm_metadata(info_hash), + Repo::DashMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + } + } + + pub(crate) async fn get(&self, key: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.get(key), + Repo::RwLockStdMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::RwLockStdMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::RwLockTokio(repo) => repo.get(key).await, + Repo::RwLockTokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), + Repo::RwLockTokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::SkipMapMutexParkingLot(repo) => Some(repo.get(key)?.lock().clone()), + Repo::SkipMapRwLockParkingLot(repo) => Some(repo.get(key)?.read().clone()), + Repo::DashMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + } + } + + pub(crate) async fn get_metrics(&self) -> TorrentsMetrics { + match self { + Repo::RwLockStd(repo) => repo.get_metrics(), + Repo::RwLockStdMutexStd(repo) => repo.get_metrics(), + Repo::RwLockStdMutexTokio(repo) => repo.get_metrics().await, + Repo::RwLockTokio(repo) => repo.get_metrics().await, + Repo::RwLockTokioMutexStd(repo) => repo.get_metrics().await, + Repo::RwLockTokioMutexTokio(repo) => repo.get_metrics().await, + Repo::SkipMapMutexStd(repo) => repo.get_metrics(), + Repo::SkipMapMutexParkingLot(repo) => repo.get_metrics(), + Repo::SkipMapRwLockParkingLot(repo) => repo.get_metrics(), + Repo::DashMapMutexStd(repo) => repo.get_metrics(), + } + } + + pub(crate) async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + match self { + Repo::RwLockStd(repo) => repo.get_paginated(pagination), + Repo::RwLockStdMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::RwLockStdMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + Repo::RwLockTokio(repo) => repo.get_paginated(pagination).await, + Repo::RwLockTokioMutexStd(repo) => repo + .get_paginated(pagination) + .await + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::RwLockTokioMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + Repo::SkipMapMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::SkipMapMutexParkingLot(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().clone())) + .collect(), + Repo::SkipMapRwLockParkingLot(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.read().clone())) + .collect(), + Repo::DashMapMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + } + } + + pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + match self { + Repo::RwLockStd(repo) => repo.import_persistent(persistent_torrents), + Repo::RwLockStdMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::RwLockStdMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::SkipMapMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::SkipMapMutexParkingLot(repo) => repo.import_persistent(persistent_torrents), + Repo::SkipMapRwLockParkingLot(repo) => repo.import_persistent(persistent_torrents), + Repo::DashMapMutexStd(repo) => repo.import_persistent(persistent_torrents), + } + } + + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.remove(key), + Repo::RwLockStdMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::RwLockStdMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::RwLockTokio(repo) => repo.remove(key).await, + Repo::RwLockTokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), + Repo::RwLockTokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::SkipMapMutexParkingLot(repo) => Some(repo.remove(key)?.lock().clone()), + Repo::SkipMapRwLockParkingLot(repo) => Some(repo.remove(key)?.write().clone()), + Repo::DashMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + } + } + + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Repo::RwLockStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::RwLockStdMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::RwLockStdMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::SkipMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::SkipMapMutexParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::SkipMapRwLockParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::DashMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + } + } + + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + match self { + Repo::RwLockStd(repo) => repo.remove_peerless_torrents(policy), + Repo::RwLockStdMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::RwLockStdMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::SkipMapMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::SkipMapMutexParkingLot(repo) => repo.remove_peerless_torrents(policy), + Repo::SkipMapRwLockParkingLot(repo) => repo.remove_peerless_torrents(policy), + Repo::DashMapMutexStd(repo) => repo.remove_peerless_torrents(policy), + } + } + + pub(crate) async fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { + match self { + Repo::RwLockStd(repo) => { + repo.write().insert(*info_hash, torrent); + } + Repo::RwLockStdMutexStd(repo) => { + repo.write().insert(*info_hash, torrent.into()); + } + Repo::RwLockStdMutexTokio(repo) => { + repo.write().insert(*info_hash, torrent.into()); + } + Repo::RwLockTokio(repo) => { + repo.write().await.insert(*info_hash, torrent); + } + Repo::RwLockTokioMutexStd(repo) => { + repo.write().await.insert(*info_hash, torrent.into()); + } + Repo::RwLockTokioMutexTokio(repo) => { + repo.write().await.insert(*info_hash, torrent.into()); + } + Repo::SkipMapMutexStd(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + Repo::SkipMapMutexParkingLot(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + Repo::SkipMapRwLockParkingLot(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + Repo::DashMapMutexStd(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + }; + self.get(info_hash).await + } +} diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs new file mode 100644 index 000000000..927f13169 --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -0,0 +1,101 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_torrent_repository::entry::{Entry as _, EntryAsync as _, EntrySync as _}; +use torrust_tracker_torrent_repository::{ + EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, +}; + +#[derive(Debug, Clone)] +pub(crate) enum Torrent { + Single(EntrySingle), + MutexStd(EntryMutexStd), + MutexTokio(EntryMutexTokio), + MutexParkingLot(EntryMutexParkingLot), + RwLockParkingLot(EntryRwLockParkingLot), +} + +impl Torrent { + pub(crate) async fn get_stats(&self) -> SwarmMetadata { + match self { + Torrent::Single(entry) => entry.get_swarm_metadata(), + Torrent::MutexStd(entry) => entry.get_swarm_metadata(), + Torrent::MutexTokio(entry) => entry.clone().get_swarm_metadata().await, + Torrent::MutexParkingLot(entry) => entry.clone().get_swarm_metadata(), + Torrent::RwLockParkingLot(entry) => entry.clone().get_swarm_metadata(), + } + } + + pub(crate) async fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + match self { + Torrent::Single(entry) => entry.meets_retaining_policy(policy), + Torrent::MutexStd(entry) => entry.meets_retaining_policy(policy), + Torrent::MutexTokio(entry) => entry.clone().meets_retaining_policy(policy).await, + Torrent::MutexParkingLot(entry) => entry.meets_retaining_policy(policy), + Torrent::RwLockParkingLot(entry) => entry.meets_retaining_policy(policy), + } + } + + pub(crate) async fn peers_is_empty(&self) -> bool { + match self { + Torrent::Single(entry) => entry.peers_is_empty(), + Torrent::MutexStd(entry) => entry.peers_is_empty(), + Torrent::MutexTokio(entry) => entry.clone().peers_is_empty().await, + Torrent::MutexParkingLot(entry) => entry.peers_is_empty(), + Torrent::RwLockParkingLot(entry) => entry.peers_is_empty(), + } + } + + pub(crate) async fn get_peers_len(&self) -> usize { + match self { + Torrent::Single(entry) => entry.get_peers_len(), + Torrent::MutexStd(entry) => entry.get_peers_len(), + Torrent::MutexTokio(entry) => entry.clone().get_peers_len().await, + Torrent::MutexParkingLot(entry) => entry.get_peers_len(), + Torrent::RwLockParkingLot(entry) => entry.get_peers_len(), + } + } + + pub(crate) async fn get_peers(&self, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers(limit), + Torrent::MutexStd(entry) => entry.get_peers(limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers(limit).await, + Torrent::MutexParkingLot(entry) => entry.get_peers(limit), + Torrent::RwLockParkingLot(entry) => entry.get_peers(limit), + } + } + + pub(crate) async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers_for_client(client, limit).await, + Torrent::MutexParkingLot(entry) => entry.get_peers_for_client(client, limit), + Torrent::RwLockParkingLot(entry) => entry.get_peers_for_client(client, limit), + } + } + + pub(crate) async fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { + match self { + Torrent::Single(entry) => entry.upsert_peer(peer), + Torrent::MutexStd(entry) => entry.upsert_peer(peer), + Torrent::MutexTokio(entry) => entry.clone().upsert_peer(peer).await, + Torrent::MutexParkingLot(entry) => entry.upsert_peer(peer), + Torrent::RwLockParkingLot(entry) => entry.upsert_peer(peer), + } + } + + pub(crate) async fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexTokio(entry) => entry.clone().remove_inactive_peers(current_cutoff).await, + Torrent::MutexParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::RwLockParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), + } + } +} diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs new file mode 100644 index 000000000..33120180d --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -0,0 +1,90 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use crate::CurrentClock; + +#[derive(Debug, Default)] +struct TorrentPeerBuilder { + peer: peer::Peer, +} + +#[allow(dead_code)] +impl TorrentPeerBuilder { + #[must_use] + fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } + } + + #[must_use] + fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + #[must_use] + fn with_event_started(mut self) -> Self { + self.peer.event = AnnounceEvent::Started; + self + } + + #[must_use] + fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + fn with_peer_id(mut self, peer_id: PeerId) -> Self { + self.peer.peer_id = peer_id; + self + } + + #[must_use] + fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes::new(left); + self + } + + #[must_use] + fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[must_use] + fn into(self) -> peer::Peer { + self.peer + } +} + +/// A torrent seeder is a peer with 0 bytes left to download which +/// has not announced it has stopped +#[must_use] +pub fn a_completed_peer(id: i32) -> peer::Peer { + let peer_id = peer::Id::new(id); + TorrentPeerBuilder::new() + .with_number_of_bytes_left(0) + .with_event_completed() + .with_peer_id(*peer_id) + .into() +} + +/// A torrent leecher is a peer that is not a seeder. +/// Leecher: left > 0 OR event = Stopped +#[must_use] +pub fn a_started_peer(id: i32) -> peer::Peer { + let peer_id = peer::Id::new(id); + TorrentPeerBuilder::new() + .with_number_of_bytes_left(1) + .with_event_started() + .with_peer_id(*peer_id) + .into() +} diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs new file mode 100644 index 000000000..43d7f94da --- /dev/null +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -0,0 +1,443 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::ops::Sub; +use std::time::Duration; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use rstest::{fixture, rstest}; +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time as _}; +use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::peer; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_torrent_repository::{ + EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, +}; + +use crate::common::torrent::Torrent; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; +use crate::CurrentClock; + +#[fixture] +fn single() -> Torrent { + Torrent::Single(EntrySingle::default()) +} +#[fixture] +fn mutex_std() -> Torrent { + Torrent::MutexStd(EntryMutexStd::default()) +} + +#[fixture] +fn mutex_tokio() -> Torrent { + Torrent::MutexTokio(EntryMutexTokio::default()) +} + +#[fixture] +fn mutex_parking_lot() -> Torrent { + Torrent::MutexParkingLot(EntryMutexParkingLot::default()) +} + +#[fixture] +fn rw_lock_parking_lot() -> Torrent { + Torrent::RwLockParkingLot(EntryRwLockParkingLot::default()) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(0, false, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, false) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(0, false, true) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, true) +} + +pub enum Makes { + Empty, + Started, + Completed, + Downloaded, + Three, +} + +async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { + match makes { + Makes::Empty => vec![], + Makes::Started => { + let peer = a_started_peer(1); + torrent.upsert_peer(&peer).await; + vec![peer] + } + Makes::Completed => { + let peer = a_completed_peer(2); + torrent.upsert_peer(&peer).await; + vec![peer] + } + Makes::Downloaded => { + let mut peer = a_started_peer(3); + torrent.upsert_peer(&peer).await; + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes::new(0); + torrent.upsert_peer(&peer).await; + vec![peer] + } + Makes::Three => { + let peer_1 = a_started_peer(1); + torrent.upsert_peer(&peer_1).await; + + let peer_2 = a_completed_peer(2); + torrent.upsert_peer(&peer_2).await; + + let mut peer_3 = a_started_peer(3); + torrent.upsert_peer(&peer_3).await; + peer_3.event = AnnounceEvent::Completed; + peer_3.left = NumberOfBytes::new(0); + torrent.upsert_peer(&peer_3).await; + vec![peer_1, peer_2, peer_3] + } + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[tokio::test] +async fn it_should_be_empty_by_default( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + assert_eq!(torrent.get_peers_len().await, 0); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&mut torrent, makes).await; + + let has_peers = !torrent.peers_is_empty().await; + let has_downloads = torrent.get_stats().await.downloaded != 0; + + match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { + // remove torrents without peers, and keep completed download stats + (true, true) => match (has_peers, has_downloads) { + // no peers, but has downloads + // peers, with or without downloads + (false, true) | (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + // no peers and no downloads + (false, false) => assert!(!torrent.meets_retaining_policy(&policy).await), + }, + // remove torrents without peers and drop completed download stats + (true, false) => match (has_peers, has_downloads) { + // peers, with or without downloads + (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + // no peers and with or without downloads + (false, true | false) => assert!(!torrent.meets_retaining_policy(&policy).await), + }, + // keep torrents without peers, but keep or drop completed download stats + (false, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_for_torrent_entry( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + + let torrent_peers = torrent.get_peers(None).await; + + assert_eq!(torrent_peers.len(), peers.len()); + + for peer in torrent_peers { + assert!(peers.contains(&peer)); + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer(#[values(single(), mutex_std(), mutex_tokio())] mut torrent: Torrent, #[case] makes: &Makes) { + make(&mut torrent, makes).await; + + // Make and insert a new peer. + let mut peer = a_started_peer(-1); + torrent.upsert_peer(&peer).await; + + // Get the Inserted Peer by Id. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started, "it should be as created"); + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + torrent.upsert_peer(&peer).await; + + // Get the Updated Peer by Id. + let peers = torrent.get_peers(None).await; + let updated = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(updated.event, AnnounceEvent::Completed, "it should be updated"); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_a_peer_upon_stopped_announcement( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + use torrust_tracker_primitives::peer::ReadInfo as _; + + make(&mut torrent, makes).await; + + let mut peer = a_started_peer(-1); + + torrent.upsert_peer(&peer).await; + + // The started peer should be inserted. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| p.get_id() == peer.get_id()) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started); + + // Change peer to "Stopped" and insert. + peer.event = AnnounceEvent::Stopped; + torrent.upsert_peer(&peer).await; + + // It should be removed now. + let peers = torrent.get_peers(None).await; + + assert_eq!( + peers.iter().find(|p| p.get_id() == peer.get_id()), + None, + "it should be removed" + ); +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + let downloaded = torrent.get_stats().await.downloaded; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_completed = peer.event == AnnounceEvent::Completed; + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; + + if is_already_completed { + assert_eq!(stats.downloaded, downloaded); + } else { + assert_eq!(stats.downloaded, downloaded + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_a_seeder( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_non_left = peer.left == NumberOfBytes::new(0); + + // Set Bytes Left to Zero + peer.left = NumberOfBytes::new(0); + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; + + if is_already_non_left { + // it was already complete + assert_eq!(stats.complete, completed); + } else { + // now it is complete + assert_eq!(stats.complete, completed + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_incomplete( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let completed_already = peer.left == NumberOfBytes::new(0); + + // Set Bytes Left to no Zero + peer.left = NumberOfBytes::new(1); + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; + + if completed_already { + // now it is incomplete + assert_eq!(stats.incomplete, incomplete + 1); + } else { + // was already incomplete + assert_eq!(stats.incomplete, incomplete); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_excluding_the_client_socket( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); + + // for this test, we should not already use this socket. + assert_ne!(peer.peer_addr, socket); + + // it should get the peer as it dose not share the socket. + assert!(torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); + + // set the address to the socket. + peer.peer_addr = socket; + torrent.upsert_peer(&peer).await; // Add peer + + // It should not include the peer that has the same socket. + assert!(!torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_limit_the_number_of_peers_returned( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let mut peer = a_started_peer(1); + peer.peer_id = *peer::Id::new(peer_number); + torrent.upsert_peer(&peer).await; + } + + let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)).await; + + assert_eq!(peers.len(), 74); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_inactive_peers_beyond_cutoff( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + let peers = make(&mut torrent, makes).await; + + let mut peer = a_completed_peer(-1); + + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + + torrent.upsert_peer(&peer).await; + + assert_eq!(torrent.get_peers_len().await, peers.len() + 1); + + let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); + torrent.remove_inactive_peers(current_cutoff).await; + + assert_eq!(torrent.get_peers_len().await, peers.len()); +} diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository/tests/integration.rs new file mode 100644 index 000000000..5aab67b03 --- /dev/null +++ b/packages/torrent-repository/tests/integration.rs @@ -0,0 +1,22 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +use torrust_tracker_clock::clock; + +pub mod common; +mod entry; +mod repository; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs new file mode 100644 index 000000000..05d538582 --- /dev/null +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -0,0 +1,639 @@ +use std::collections::{BTreeMap, HashSet}; +use std::hash::{DefaultHasher, Hash, Hasher}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use rstest::{fixture, rstest}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::PersistentTorrents; +use torrust_tracker_torrent_repository::entry::Entry as _; +use torrust_tracker_torrent_repository::repository::dash_map_mutex_std::XacrimonDashMap; +use torrust_tracker_torrent_repository::repository::rw_lock_std::RwLockStd; +use torrust_tracker_torrent_repository::repository::rw_lock_tokio::RwLockTokio; +use torrust_tracker_torrent_repository::repository::skip_map_mutex_std::CrossbeamSkipList; +use torrust_tracker_torrent_repository::EntrySingle; + +use crate::common::repo::Repo; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; + +#[fixture] +fn standard() -> Repo { + Repo::RwLockStd(RwLockStd::default()) +} + +#[fixture] +fn standard_mutex() -> Repo { + Repo::RwLockStdMutexStd(RwLockStd::default()) +} + +#[fixture] +fn standard_tokio() -> Repo { + Repo::RwLockStdMutexTokio(RwLockStd::default()) +} + +#[fixture] +fn tokio_std() -> Repo { + Repo::RwLockTokio(RwLockTokio::default()) +} + +#[fixture] +fn tokio_mutex() -> Repo { + Repo::RwLockTokioMutexStd(RwLockTokio::default()) +} + +#[fixture] +fn tokio_tokio() -> Repo { + Repo::RwLockTokioMutexTokio(RwLockTokio::default()) +} + +#[fixture] +fn skip_list_mutex_std() -> Repo { + Repo::SkipMapMutexStd(CrossbeamSkipList::default()) +} + +#[fixture] +fn skip_list_mutex_parking_lot() -> Repo { + Repo::SkipMapMutexParkingLot(CrossbeamSkipList::default()) +} + +#[fixture] +fn skip_list_rw_lock_parking_lot() -> Repo { + Repo::SkipMapRwLockParkingLot(CrossbeamSkipList::default()) +} + +#[fixture] +fn dash_map_std() -> Repo { + Repo::DashMapMutexStd(XacrimonDashMap::default()) +} + +type Entries = Vec<(InfoHash, EntrySingle)>; + +#[fixture] +fn empty() -> Entries { + vec![] +} + +#[fixture] +fn default() -> Entries { + vec![(InfoHash::default(), EntrySingle::default())] +} + +#[fixture] +fn started() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.upsert_peer(&a_started_peer(1)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn completed() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.upsert_peer(&a_completed_peer(2)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn downloaded() -> Entries { + let mut torrent = EntrySingle::default(); + let mut peer = a_started_peer(3); + torrent.upsert_peer(&peer); + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes::new(0); + torrent.upsert_peer(&peer); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn three() -> Entries { + let mut started = EntrySingle::default(); + let started_h = &mut DefaultHasher::default(); + started.upsert_peer(&a_started_peer(1)); + started.hash(started_h); + + let mut completed = EntrySingle::default(); + let completed_h = &mut DefaultHasher::default(); + completed.upsert_peer(&a_completed_peer(2)); + completed.hash(completed_h); + + let mut downloaded = EntrySingle::default(); + let downloaded_h = &mut DefaultHasher::default(); + let mut downloaded_peer = a_started_peer(3); + downloaded.upsert_peer(&downloaded_peer); + downloaded_peer.event = AnnounceEvent::Completed; + downloaded_peer.left = NumberOfBytes::new(0); + downloaded.upsert_peer(&downloaded_peer); + downloaded.hash(downloaded_h); + + vec![ + (InfoHash::from(&started_h.clone()), started), + (InfoHash::from(&completed_h.clone()), completed), + (InfoHash::from(&downloaded_h.clone()), downloaded), + ] +} + +#[fixture] +fn many_out_of_order() -> Entries { + let mut entries: HashSet<(InfoHash, EntrySingle)> = HashSet::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.upsert_peer(&a_started_peer(i)); + + entries.insert((InfoHash::from(&i), entry)); + } + + // we keep the random order from the hashed set for the vector. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn many_hashed_in_order() -> Entries { + let mut entries: BTreeMap = BTreeMap::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.upsert_peer(&a_started_peer(i)); + + let hash: &mut DefaultHasher = &mut DefaultHasher::default(); + hash.write_i32(i); + + entries.insert(InfoHash::from(&hash.clone()), entry); + } + + // We return the entries in-order from from the b-tree map. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn persistent_empty() -> PersistentTorrents { + PersistentTorrents::default() +} + +#[fixture] +fn persistent_single() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let t = [(InfoHash::from(&hash.clone()), 0_u32)]; + + t.iter().copied().collect() +} + +#[fixture] +fn persistent_three() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let info_1 = InfoHash::from(&hash.clone()); + hash.write_u8(2); + let info_2 = InfoHash::from(&hash.clone()); + hash.write_u8(3); + let info_3 = InfoHash::from(&hash.clone()); + + let t = [(info_1, 1_u32), (info_2, 2_u32), (info_3, 3_u32)]; + + t.iter().copied().collect() +} + +async fn make(repo: &Repo, entries: &Entries) { + for (info_hash, entry) in entries { + repo.insert(info_hash, entry.clone()).await; + } +} + +#[fixture] +fn paginated_limit_zero() -> Pagination { + Pagination::new(0, 0) +} + +#[fixture] +fn paginated_limit_one() -> Pagination { + Pagination::new(0, 1) +} + +#[fixture] +fn paginated_limit_one_offset_one() -> Pagination { + Pagination::new(1, 1) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(0, false, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, false) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(0, false, true) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, true) +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_a_torrent_entry( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + if let Some((info_hash, torrent)) = entries.first() { + assert_eq!(repo.get(info_hash).await, Some(torrent.clone())); + } else { + assert_eq!(repo.get(&InfoHash::default()).await, None); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot() + )] + repo: Repo, + #[case] entries: Entries, + many_out_of_order: Entries, +) { + make(&repo, &entries).await; + + let entries_a = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + make(&repo, &many_out_of_order).await; + + let entries_b = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::>() == entries_a; + + let is_sorted = entries_b.windows(2).all(|w| w[0] <= w[1]); + + assert!( + is_equal || is_sorted, + "The order is unstable: {is_equal}, or is sorted {is_sorted}." + ); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot() + )] + repo: Repo, + #[case] entries: Entries, + #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, +) { + make(&repo, &entries).await; + + let mut info_hashes = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + info_hashes.sort(); + + match paginated { + // it should return empty if limit is zero. + Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)).await, vec![]), + + // it should return a single entry if the limit is one. + Pagination { limit: 1, offset: 0 } => { + if info_hashes.is_empty() { + assert_eq!(repo.get_paginated(Some(&paginated)).await.len(), 0); + } else { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page.first().map(|(i, _)| i), info_hashes.first()); + } + } + + // it should return the only the second entry if both the limit and the offset are one. + Pagination { limit: 1, offset: 1 } => { + if info_hashes.len() > 1 { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page[0].0, info_hashes[1]); + } + } + // the other cases are not yet tested. + _ => {} + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_metrics( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + + make(&repo, &entries).await; + + let mut metrics = TorrentsMetrics::default(); + + for (_, torrent) in entries { + let stats = torrent.get_swarm_metadata(); + + metrics.torrents += 1; + metrics.incomplete += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + } + + assert_eq!(repo.get_metrics().await, metrics); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_import_persistent_torrents( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, + #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, +) { + make(&repo, &entries).await; + + let mut downloaded = repo.get_metrics().await.downloaded; + persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); + + repo.import_persistent(&persistent_torrents).await; + + assert_eq!(repo.get_metrics().await.downloaded, downloaded); + + for (entry, _) in persistent_torrents { + assert!(repo.get(&entry).await.is_some()); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_an_entry( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + for (info_hash, torrent) in entries { + assert_eq!(repo.get(&info_hash).await, Some(torrent.clone())); + assert_eq!(repo.remove(&info_hash).await, Some(torrent)); + + assert_eq!(repo.get(&info_hash).await, None); + assert_eq!(repo.remove(&info_hash).await, None); + } + + assert_eq!(repo.get_metrics().await.torrents, 0); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_inactive_peers( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + use std::ops::Sub as _; + use std::time::Duration; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time as _}; + use torrust_tracker_primitives::peer; + + use crate::CurrentClock; + + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + make(&repo, &entries).await; + + let info_hash: InfoHash; + let mut peer: peer::Peer; + + // Generate a new infohash and peer. + { + let hash = &mut DefaultHasher::default(); + hash.write_u8(255); + info_hash = InfoHash::from(&hash.clone()); + peer = a_completed_peer(-1); + } + + // Set the last updated time of the peer to be 121 seconds ago. + { + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + } + + // Insert the infohash and peer into the repository + // and verify there is an extra torrent entry. + { + repo.upsert_peer(&info_hash, &peer).await; + assert_eq!(repo.get_metrics().await.torrents, entries.len() as u64 + 1); + } + + // Insert the infohash and peer into the repository + // and verify the swarm metadata was updated. + { + repo.upsert_peer(&info_hash, &peer).await; + let stats = repo.get_swarm_metadata(&info_hash).await; + assert_eq!( + stats, + Some(SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }) + ); + } + + // Verify that this new peer was inserted into the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(entry.get_peers(None).contains(&peer.into())); + } + + // Remove peers that have not been updated since the timeout (120 seconds ago). + { + repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .await; + } + + // Verify that the this peer was removed from the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(!entry.get_peers(None).contains(&peer.into())); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_peerless_torrents( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&repo, &entries).await; + + repo.remove_peerless_torrents(&policy).await; + + let torrents = repo.get_paginated(None).await; + + for (_, entry) in torrents { + assert!(entry.meets_retaining_policy(&policy)); + } +} diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..76046e6f4 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +group_imports = "StdExternalCrate" +imports_granularity = "Module" +max_width = 130 diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh new file mode 100644 index 000000000..32cdfe33d --- /dev/null +++ b/share/container/entry_script_sh @@ -0,0 +1,82 @@ +#!/bin/sh +set -x + +to_lc() { echo "$1" | tr '[:upper:]' '[:lower:]'; } +clean() { echo "$1" | tr -d -c 'a-zA-Z0-9-' ; } +cmp_lc() { [ "$(to_lc "$(clean "$1")")" = "$(to_lc "$(clean "$2")")" ]; } + + +inst() { + if [ -n "$1" ] && [ -n "$2" ] && [ -e "$1" ] && [ ! -e "$2" ]; then + install -D -m 0640 -o torrust -g torrust "$1" "$2"; fi; } + + +# Add torrust user, based upon supplied user-id. +if [ -z "$USER_ID" ] && [ "$USER_ID" -lt 1000 ]; then + echo "ERROR: USER_ID is not set, or less than 1000" + exit 1 +fi + +adduser --disabled-password --shell "/bin/sh" --uid "$USER_ID" "torrust" + +# Configure Permissions for Torrust Folders +mkdir -p /var/lib/torrust/tracker/database/ /etc/torrust/tracker/ +chown -R "${USER_ID}":"${USER_ID}" /var/lib/torrust /var/log/torrust /etc/torrust +chmod -R 2770 /var/lib/torrust /var/log/torrust /etc/torrust + + +# Install the database and config: +if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" ]; then + if cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "sqlite3"; then + + # Select Sqlite3 empty database + default_database="/usr/share/torrust/default/database/tracker.sqlite3.db" + + # Select Sqlite3 default configuration + default_config="/usr/share/torrust/default/config/tracker.container.sqlite3.toml" + + elif cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "mysql"; then + + # (no database file needed for MySQL) + + # Select default MySQL configuration + default_config="/usr/share/torrust/default/config/tracker.container.mysql.toml" + + else + echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER\"." + echo "Please Note: Supported Database Types: \"sqlite3\", \"mysql\"." + exit 1 + fi +else + echo "Error: \"\$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER\" was not set!"; exit 1; +fi + +install_config="/etc/torrust/tracker/tracker.toml" +install_database="/var/lib/torrust/tracker/database/sqlite3.db" + +inst "$default_config" "$install_config" +inst "$default_database" "$install_database" + +# Make Minimal Message of the Day +if cmp_lc "$RUNTIME" "runtime"; then + printf '\n in runtime \n' >> /etc/motd; +elif cmp_lc "$RUNTIME" "debug"; then + printf '\n in debug mode \n' >> /etc/motd; +elif cmp_lc "$RUNTIME" "release"; then + printf '\n in release mode \n' >> /etc/motd; +else + echo "ERROR: running in unknown mode: \"$RUNTIME\""; exit 1 +fi + +if [ -e "/usr/share/torrust/container/message" ]; then + cat "/usr/share/torrust/container/message" >> /etc/motd; chmod 0644 /etc/motd +fi + +# Load message of the day from Profile +# shellcheck disable=SC2016 +echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/profile + +cd /home/torrust || exit 1 + +# Switch to torrust user +exec /bin/su-exec torrust "$@" diff --git a/share/container/message b/share/container/message new file mode 100644 index 000000000..6bfd6bfb8 --- /dev/null +++ b/share/container/message @@ -0,0 +1,4 @@ + +Lovely welcome to our Torrust Tracker Container! + +run 'torrust-tracker' to start tracker diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml new file mode 100644 index 000000000..865ea224e --- /dev/null +++ b/share/default/config/tracker.container.mysql.toml @@ -0,0 +1,29 @@ +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" + +[logging] +threshold = "info" + +[core] +listed = false +private = false + +[core.database] +driver = "mysql" +path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" + +# Uncomment to enable services + +#[[udp_trackers]] +#bind_address = "0.0.0.0:6969" + +#[[http_trackers]] +#bind_address = "0.0.0.0:7070" + +#[http_api] +#bind_address = "0.0.0.0:1212" + +#[http_api.access_tokens] +#admin = "MyAccessToken" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml new file mode 100644 index 000000000..6c73cf54a --- /dev/null +++ b/share/default/config/tracker.container.sqlite3.toml @@ -0,0 +1,28 @@ +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" + +[logging] +threshold = "info" + +[core] +listed = false +private = false + +[core.database] +path = "/var/lib/torrust/tracker/database/sqlite3.db" + +# Uncomment to enable services + +#[[udp_trackers]] +#bind_address = "0.0.0.0:6969" + +#[[http_trackers]] +#bind_address = "0.0.0.0:7070" + +#[http_api] +#bind_address = "0.0.0.0:1212" + +#[http_api.access_tokens] +#admin = "MyAccessToken" diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml new file mode 100644 index 000000000..96addaf87 --- /dev/null +++ b/share/default/config/tracker.development.sqlite3.toml @@ -0,0 +1,23 @@ +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" + +[logging] +threshold = "info" + +[core] +listed = false +private = false + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" + +[[http_trackers]] +bind_address = "0.0.0.0:7070" + +[http_api] +bind_address = "0.0.0.0:1212" + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml new file mode 100644 index 000000000..73c6df219 --- /dev/null +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -0,0 +1,30 @@ +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" + +[logging] +threshold = "info" + +[core] +listed = false +private = false + +[core.database] +path = "/var/lib/torrust/tracker/database/sqlite3.db" + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" + +[[http_trackers]] +bind_address = "0.0.0.0:7070" + +[http_api] +bind_address = "0.0.0.0:1212" + +[http_api.access_tokens] +admin = "MyAccessToken" + +[health_check_api] +# Must be bound to wildcard IP to be accessible from outside the container +bind_address = "0.0.0.0:1313" diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml new file mode 100644 index 000000000..c6644d8dc --- /dev/null +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -0,0 +1,21 @@ +[metadata] +schema_version = "2.0.0" + +[logging] +threshold = "error" + +[core] +listed = false +private = false +tracker_usage_statistics = false + +[core.database] +driver = "sqlite3" +path = "./sqlite3.db" + +[core.tracker_policy] +persistent_torrent_completed_stat = false +remove_peerless_torrents = false + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" diff --git a/share/default/config/tracker_checker.json b/share/default/config/tracker_checker.json new file mode 100644 index 000000000..7d1453bfd --- /dev/null +++ b/share/default/config/tracker_checker.json @@ -0,0 +1,11 @@ +{ + "udp_trackers": [ + "127.0.0.1:6969" + ], + "http_trackers": [ + "http://127.0.0.1:7070" + ], + "health_checks": [ + "http://127.0.0.1:1313/health_check" + ] +} \ No newline at end of file diff --git a/src/app.rs b/src/app.rs new file mode 100644 index 000000000..06fea4d2e --- /dev/null +++ b/src/app.rs @@ -0,0 +1,128 @@ +//! Torrust Tracker application. +//! +//! The tracker application has a global configuration for multiple jobs. +//! It's basically a container for other services. +//! It also check constraint and dependencies between services. For example: +//! It's not safe to run a UDP tracker on top of a core public tracker, as UDP trackers +//! do not allow private access to the tracker data. +//! +//! The application is responsible for: +//! +//! - Loading data from the database when it's needed. +//! - Starting some jobs depending on the configuration. +//! +//! Jobs executed always: +//! +//! - Health Check API +//! +//! Optional jobs: +//! +//! - Torrent cleaner: it removes inactive peers and (optionally) peerless torrents. +//! - UDP trackers: the user can enable multiple UDP tracker on several ports. +//! - HTTP trackers: the user can enable multiple HTTP tracker on several ports. +//! - Tracker REST API: the tracker API can be enabled/disabled. +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; +use tracing::instrument; + +use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::servers::registar::Registar; +use crate::{core, servers}; + +/// # Panics +/// +/// Will panic if: +/// +/// - Can't retrieve tracker keys from database. +/// - Can't load whitelist from database. +#[instrument(skip(config, tracker))] +pub async fn start(config: &Configuration, tracker: Arc) -> Vec> { + if config.http_api.is_none() + && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) + && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) + { + tracing::warn!("No services enabled in configuration"); + } + + let mut jobs: Vec> = Vec::new(); + + let registar = Registar::default(); + + // Load peer keys + if tracker.is_private() { + tracker + .load_keys_from_database() + .await + .expect("Could not retrieve keys from database."); + } + + // Load whitelisted torrents + if tracker.is_listed() { + tracker + .load_whitelist_from_database() + .await + .expect("Could not load whitelist from database."); + } + + // Start the UDP blocks + if let Some(udp_trackers) = &config.udp_trackers { + for udp_tracker_config in udp_trackers { + if tracker.is_private() { + tracing::warn!( + "Could not start UDP tracker on: {} while in private mode. UDP is not safe for private trackers!", + udp_tracker_config.bind_address + ); + } else { + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone(), registar.give_form()).await); + } + } + } else { + tracing::info!("No UDP blocks in configuration"); + } + + // Start the HTTP blocks + if let Some(http_trackers) = &config.http_trackers { + for http_tracker_config in http_trackers { + if let Some(job) = http_tracker::start_job( + http_tracker_config, + tracker.clone(), + registar.give_form(), + servers::http::Version::V1, + ) + .await + { + jobs.push(job); + }; + } + } else { + tracing::info!("No HTTP blocks in configuration"); + } + + // Start HTTP API + if let Some(http_api_config) = &config.http_api { + if let Some(job) = tracker_apis::start_job( + http_api_config, + tracker.clone(), + registar.give_form(), + servers::apis::Version::V1, + ) + .await + { + jobs.push(job); + }; + } else { + tracing::info!("No API block in configuration"); + } + + // Start runners to remove torrents without peers, every interval + if config.core.inactive_peer_cleanup_interval > 0 { + jobs.push(torrent_cleanup::start_job(&config.core, &tracker)); + } + + // Start Health Check API + jobs.push(health_check_api::start_job(&config.health_check_api, registar.entries()).await); + + jobs +} diff --git a/src/bin/e2e_tests_runner.rs b/src/bin/e2e_tests_runner.rs new file mode 100644 index 000000000..eb91c0d86 --- /dev/null +++ b/src/bin/e2e_tests_runner.rs @@ -0,0 +1,6 @@ +//! Program to run E2E tests. +use torrust_tracker::console::ci::e2e; + +fn main() -> anyhow::Result<()> { + e2e::runner::run() +} diff --git a/src/bin/http_health_check.rs b/src/bin/http_health_check.rs new file mode 100644 index 000000000..b7c6dfa41 --- /dev/null +++ b/src/bin/http_health_check.rs @@ -0,0 +1,42 @@ +//! Minimal `curl` or `wget` to be used for container health checks. +//! +//! It's convenient to avoid using third-party libraries because: +//! +//! - They are harder to maintain. +//! - They introduce new attack vectors. +use std::time::Duration; +use std::{env, process}; + +use reqwest::Client; + +#[tokio::main] +async fn main() { + let args: Vec = env::args().collect(); + if args.len() != 2 { + eprintln!("Usage: cargo run --bin http_health_check "); + eprintln!("Example: cargo run --bin http_health_check http://127.0.0.1:1313/health_check"); + std::process::exit(1); + } + + println!("Health check ..."); + + let url = &args[1].clone(); + + let client = Client::builder().timeout(Duration::from_secs(5)).build().unwrap(); + + match client.get(url).send().await { + Ok(response) => { + if response.status().is_success() { + println!("STATUS: {}", response.status()); + process::exit(0); + } else { + println!("Non-success status received."); + process::exit(1); + } + } + Err(err) => { + println!("ERROR: {err}"); + process::exit(1); + } + } +} diff --git a/src/bin/http_tracker_client.rs b/src/bin/http_tracker_client.rs new file mode 100644 index 000000000..0de040549 --- /dev/null +++ b/src/bin/http_tracker_client.rs @@ -0,0 +1,7 @@ +//! Program to make request to HTTP trackers. +use torrust_tracker::console::clients::http::app; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + app::run().await +} diff --git a/src/bin/profiling.rs b/src/bin/profiling.rs new file mode 100644 index 000000000..bc1ac6526 --- /dev/null +++ b/src/bin/profiling.rs @@ -0,0 +1,8 @@ +//! This binary is used for profiling with [valgrind](https://valgrind.org/) +//! and [kcachegrind](https://kcachegrind.github.io/). +use torrust_tracker::console::profiling::run; + +#[tokio::main] +async fn main() { + run().await; +} diff --git a/src/bin/tracker_checker.rs b/src/bin/tracker_checker.rs new file mode 100644 index 000000000..87aeedeac --- /dev/null +++ b/src/bin/tracker_checker.rs @@ -0,0 +1,7 @@ +//! Program to check running trackers. +use torrust_tracker::console::clients::checker::app; + +#[tokio::main] +async fn main() { + app::run().await.expect("Some checks fail"); +} diff --git a/src/bin/udp_tracker_client.rs b/src/bin/udp_tracker_client.rs new file mode 100644 index 000000000..909b296ca --- /dev/null +++ b/src/bin/udp_tracker_client.rs @@ -0,0 +1,7 @@ +//! Program to make request to UDP trackers. +use torrust_tracker::console::clients::udp::app; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + app::run().await +} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs new file mode 100644 index 000000000..7c0cf45ac --- /dev/null +++ b/src/bootstrap/app.rs @@ -0,0 +1,90 @@ +//! Setup for the main tracker application. +//! +//! The [`setup`] only builds the application and its dependencies but it does not start the application. +//! In fact, there is no such thing as the main application process. When the application starts, the only thing it does is +//! starting a bunch of independent jobs. If you are looking for how things are started you should read [`app::start`](crate::app::start) +//! function documentation. +//! +//! Setup steps: +//! +//! 1. Load the global application configuration. +//! 2. Initialize static variables. +//! 3. Initialize logging. +//! 4. Initialize the domain tracker. +use std::sync::Arc; + +use torrust_tracker_clock::static_time; +use torrust_tracker_configuration::validator::Validator; +use torrust_tracker_configuration::Configuration; +use tracing::instrument; + +use super::config::initialize_configuration; +use crate::bootstrap; +use crate::core::services::tracker_factory; +use crate::core::Tracker; +use crate::shared::crypto::ephemeral_instance_keys; + +/// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. +/// +/// # Panics +/// +/// Setup can file if the configuration is invalid. +#[must_use] +#[instrument(skip())] +pub fn setup() -> (Configuration, Arc) { + let configuration = initialize_configuration(); + + if let Err(e) = configuration.validate() { + panic!("Configuration error: {e}"); + } + + let tracker = initialize_with_configuration(&configuration); + + tracing::info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); + + (configuration, tracker) +} + +/// It initializes the application with the given configuration. +/// +/// The configuration may be obtained from the environment (via config file or env vars). +#[must_use] +#[instrument(skip())] +pub fn initialize_with_configuration(configuration: &Configuration) -> Arc { + initialize_static(); + initialize_logging(configuration); + Arc::new(initialize_tracker(configuration)) +} + +/// It initializes the application static values. +/// +/// These values are accessible throughout the entire application: +/// +/// - The time when the application started. +/// - An ephemeral instance random seed. This seed is used for encryption and it's changed when the main application process is restarted. +#[instrument(skip())] +pub fn initialize_static() { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); + + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); +} + +/// It builds the domain tracker +/// +/// The tracker is the domain layer service. It's the entrypoint to make requests to the domain layer. +/// It's used by other higher-level components like the UDP and HTTP trackers or the tracker API. +#[must_use] +#[instrument(skip(config))] +pub fn initialize_tracker(config: &Configuration) -> Tracker { + tracker_factory(config) +} + +/// It initializes the log threshold, format and channel. +/// +/// See [the logging setup](crate::bootstrap::logging::setup) for more info about logging. +#[instrument(skip(config))] +pub fn initialize_logging(config: &Configuration) { + bootstrap::logging::setup(config); +} diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs new file mode 100644 index 000000000..fb5afe403 --- /dev/null +++ b/src/bootstrap/config.rs @@ -0,0 +1,39 @@ +//! Initialize configuration from file or env var. +//! +//! All environment variables are prefixed with `TORRUST_TRACKER_`. + +use torrust_tracker_configuration::{Configuration, Info}; + +pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.development.sqlite3.toml"; + +/// It loads the application configuration from the environment. +/// +/// There are two methods to inject the configuration: +/// +/// 1. By using a config file: `tracker.toml`. +/// 2. Environment variable: `TORRUST_TRACKER_CONFIG_TOML`. The variable contains the same contents as the `tracker.toml` file. +/// +/// Environment variable has priority over the config file. +/// +/// Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. +/// +/// # Panics +/// +/// Will panic if it can't load the configuration from either +/// `./tracker.toml` file or the env var `TORRUST_TRACKER_CONFIG_TOML`. +#[must_use] +pub fn initialize_configuration() -> Configuration { + let info = Info::new(DEFAULT_PATH_CONFIG.to_string()).expect("info to load configuration is not valid"); + Configuration::load(&info).expect("error loading configuration from sources") +} + +#[cfg(test)] +mod tests { + + #[test] + fn it_should_load_with_default_config() { + use crate::bootstrap::config::initialize_configuration; + + drop(initialize_configuration()); + } +} diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs new file mode 100644 index 000000000..b6250efcc --- /dev/null +++ b/src/bootstrap/jobs/health_check_api.rs @@ -0,0 +1,73 @@ +//! Health Check API job starter. +//! +//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) +//! function starts the Health Check REST API. +//! +//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) +//! function spawns a new asynchronous task, that tasks is the "**launcher**". +//! The "**launcher**" starts the actual server and sends a message back +//! to the main application. +//! +//! The "**launcher**" is an intermediary thread that decouples the Health Check +//! API server from the process that handles it. +//! +//! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +//! for the API configuration options. + +use tokio::sync::oneshot; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::HealthCheckApi; +use tracing::instrument; + +use super::Started; +use crate::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; +use crate::servers::logging::STARTED_ON; +use crate::servers::registar::ServiceRegistry; +use crate::servers::signals::Halted; + +/// This function starts a new Health Check API server with the provided +/// configuration. +/// +/// The functions starts a new concurrent task that will run the API server. +/// This task will send a message to the main application process to notify +/// that the API server was successfully started. +/// +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. +#[allow(clippy::async_yields_async)] +#[instrument(skip(config, register))] +pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> JoinHandle<()> { + let bind_addr = config.bind_address; + + let (tx_start, rx_start) = oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + let protocol = "http"; + + // Run the API server + let join_handle = tokio::spawn(async move { + tracing::info!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting on: {protocol}://{}", bind_addr); + + let handle = server::start(bind_addr, tx_start, rx_halt, register); + + if let Ok(()) = handle.await { + tracing::info!(target: HEALTH_CHECK_API_LOG_TARGET, "Stopped server running on: {protocol}://{}", bind_addr); + } + }); + + // Wait until the server sends the started message + match rx_start.await { + Ok(msg) => tracing::info!(target: HEALTH_CHECK_API_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", msg.address), + Err(e) => panic!("the Health Check API server was dropped: {e}"), + } + + // Wait until the server finishes + tokio::spawn(async move { + assert!(!tx_halt.is_closed(), "Halt channel for Health Check API should be open"); + + join_handle + .await + .expect("it should be able to join to the Health Check API server task"); + }) +} diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs new file mode 100644 index 000000000..c55723bc6 --- /dev/null +++ b/src/bootstrap/jobs/http_tracker.rs @@ -0,0 +1,103 @@ +//! HTTP tracker job starter. +//! +//! The function [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) starts a new HTTP tracker server. +//! +//! > **NOTICE**: the application can launch more than one HTTP tracker on different ports. +//! > Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. +//! +//! The [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) function spawns a new asynchronous task, +//! that tasks is the "**launcher**". The "**launcher**" starts the actual server and sends a message back to the main application. +//! +//! The "**launcher**" is an intermediary thread that decouples the HTTP servers from the process that handles it. The HTTP could be used independently in the future. +//! In that case it would not need to notify a parent process. +use std::net::SocketAddr; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::HttpTracker; +use tracing::instrument; + +use super::make_rust_tls; +use crate::core; +use crate::servers::http::server::{HttpServer, Launcher}; +use crate::servers::http::Version; +use crate::servers::registar::ServiceRegistrationForm; + +/// It starts a new HTTP server with the provided configuration and version. +/// +/// Right now there is only one version but in the future we could support more than one HTTP tracker version at the same time. +/// This feature allows supporting breaking changes on `BitTorrent` BEPs. +/// +/// # Panics +/// +/// It would panic if the `config::HttpTracker` struct would contain inappropriate values. +/// +#[instrument(skip(config, tracker, form))] +pub async fn start_job( + config: &HttpTracker, + tracker: Arc, + form: ServiceRegistrationForm, + version: Version, +) -> Option> { + let socket = config.bind_address; + + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); + + match version { + Version::V1 => Some(start_v1(socket, tls, tracker.clone(), form).await), + } +} + +#[allow(clippy::async_yields_async)] +#[instrument(skip(socket, tls, tracker, form))] +async fn start_v1( + socket: SocketAddr, + tls: Option, + tracker: Arc, + form: ServiceRegistrationForm, +) -> JoinHandle<()> { + let server = HttpServer::new(Launcher::new(socket, tls)) + .start(tracker, form) + .await + .expect("it should be able to start to the http tracker"); + + tokio::spawn(async move { + assert!( + !server.state.halt_task.is_closed(), + "Halt channel for HTTP tracker should be open" + ); + server + .state + .task + .await + .expect("it should be able to join to the http tracker task"); + }) +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration::ephemeral_public; + + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::http_tracker::start_job; + use crate::servers::http::Version; + use crate::servers::registar::Registar; + + #[tokio::test] + async fn it_should_start_http_tracker() { + let cfg = Arc::new(ephemeral_public()); + let http_tracker = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); + let config = &http_tracker[0]; + let tracker = initialize_with_configuration(&cfg); + let version = Version::V1; + + start_job(config, tracker, Registar::default().give_form(), version) + .await + .expect("it should be able to join to the http tracker start-job"); + } +} diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs new file mode 100644 index 000000000..6e18ec3ba --- /dev/null +++ b/src/bootstrap/jobs/mod.rs @@ -0,0 +1,107 @@ +//! Application jobs launchers. +//! +//! The main application setup has only two main stages: +//! +//! 1. Setup the domain layer: the core tracker. +//! 2. Launch all the application services as concurrent jobs. +//! +//! This modules contains all the functions needed to start those jobs. +pub mod health_check_api; +pub mod http_tracker; +pub mod torrent_cleanup; +pub mod tracker_apis; +pub mod udp_tracker; + +/// This is the message that the "launcher" spawned task sends to the main +/// application process to notify the service was successfully started. +/// +#[derive(Debug)] +pub struct Started { + pub address: std::net::SocketAddr, +} + +#[instrument(skip(opt_tsl_config))] +pub async fn make_rust_tls(opt_tsl_config: &Option) -> Option> { + match opt_tsl_config { + Some(tsl_config) => { + let cert = tsl_config.ssl_cert_path.clone(); + let key = tsl_config.ssl_key_path.clone(); + + if !cert.exists() || !key.exists() { + return Some(Err(Error::MissingTlsConfig { + location: Location::caller(), + })); + } + + tracing::info!("Using https: cert path: {cert}."); + tracing::info!("Using https: key path: {key}."); + + Some( + RustlsConfig::from_pem_file(cert, key) + .await + .map_err(|err| Error::BadTlsConfig { + source: (Arc::new(err) as DynError).into(), + }), + ) + } + None => None, + } +} + +#[cfg(test)] +mod tests { + + use camino::Utf8PathBuf; + use torrust_tracker_configuration::TslConfig; + + use super::{make_rust_tls, Error}; + + #[tokio::test] + async fn it_should_error_on_bad_tls_config() { + let err = make_rust_tls(&Some(TslConfig { + ssl_cert_path: Utf8PathBuf::from("bad cert path"), + ssl_key_path: Utf8PathBuf::from("bad key path"), + })) + .await + .expect("tls_was_enabled") + .expect_err("bad_cert_and_key_files"); + + assert!(matches!(err, Error::MissingTlsConfig { location: _ })); + } + + #[tokio::test] + async fn it_should_error_on_missing_cert_or_key_paths() { + let err = make_rust_tls(&Some(TslConfig { + ssl_cert_path: Utf8PathBuf::from(""), + ssl_key_path: Utf8PathBuf::from(""), + })) + .await + .expect("tls_was_enabled") + .expect_err("missing_config"); + + assert!(matches!(err, Error::MissingTlsConfig { location: _ })); + } +} + +use std::panic::Location; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use thiserror::Error; +use torrust_tracker_configuration::TslConfig; +use torrust_tracker_located_error::{DynError, LocatedError}; +use tracing::instrument; + +/// Error returned by the Bootstrap Process. +#[derive(Error, Debug)] +pub enum Error { + /// Enabled tls but missing config. + #[error("tls config missing")] + MissingTlsConfig { location: &'static Location<'static> }, + + /// Unable to parse tls Config. + #[error("bad tls config: {source}")] + BadTlsConfig { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, +} diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs new file mode 100644 index 000000000..6abb4f26b --- /dev/null +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -0,0 +1,57 @@ +//! Job that runs a task on intervals to clean up torrents. +//! +//! It removes inactive peers and (optionally) peerless torrents. +//! +//! **Inactive peers** are peers that have not been updated for more than `max_peer_timeout` seconds. +//! `max_peer_timeout` is a customizable core tracker option. +//! +//! If the core tracker configuration option `remove_peerless_torrents` is true, the cleanup job will also +//! remove **peerless torrents** which are torrents with an empty peer list. +//! +//! Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about those options. + +use std::sync::Arc; + +use chrono::Utc; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Core; +use tracing::instrument; + +use crate::core; + +/// It starts a jobs for cleaning up the torrent data in the tracker. +/// +/// The cleaning task is executed on an `inactive_peer_cleanup_interval`. +/// +/// Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about that option. +#[must_use] +#[instrument(skip(config, tracker))] +pub fn start_job(config: &Core, tracker: &Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(tracker); + let interval = config.inactive_peer_cleanup_interval; + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + tracing::info!("Stopping torrent cleanup job.."); + break; + } + _ = interval.tick() => { + if let Some(tracker) = weak_tracker.upgrade() { + let start_time = Utc::now().time(); + tracing::info!("Cleaning up torrents.."); + tracker.cleanup_torrents(); + tracing::info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); + } else { + break; + } + } + } + } + }) +} diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs new file mode 100644 index 000000000..35b13b7ce --- /dev/null +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -0,0 +1,119 @@ +//! Tracker API job starter. +//! +//! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) +//! function starts a the HTTP tracker REST API. +//! +//! > **NOTICE**: that even thought there is only one job the API has different +//! > versions. API consumers can choose which version to use. The API version is +//! > part of the URL, for example: `http://localhost:1212/api/v1/stats`. +//! +//! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) +//! function spawns a new asynchronous task, that tasks is the "**launcher**". +//! The "**launcher**" starts the actual server and sends a message back +//! to the main application. The main application waits until receives +//! the message [`ApiServerJobStarted`] +//! from the "**launcher**". +//! +//! The "**launcher**" is an intermediary thread that decouples the API server +//! from the process that handles it. The API could be used independently +//! in the future. In that case it would not need to notify a parent process. +//! +//! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +//! for the API configuration options. +use std::net::SocketAddr; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::{AccessTokens, HttpApi}; +use tracing::instrument; + +use super::make_rust_tls; +use crate::core; +use crate::servers::apis::server::{ApiServer, Launcher}; +use crate::servers::apis::Version; +use crate::servers::registar::ServiceRegistrationForm; + +/// This is the message that the "launcher" spawned task sends to the main +/// application process to notify the API server was successfully started. +/// +/// > **NOTICE**: it does not mean the API server is ready to receive requests. +/// > It only means the new server started. It might take some time to the server +/// > to be ready to accept request. +#[derive(Debug)] +pub struct ApiServerJobStarted(); + +/// This function starts a new API server with the provided configuration. +/// +/// The functions starts a new concurrent task that will run the API server. +/// This task will send a message to the main application process to notify +/// that the API server was successfully started. +/// +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. +/// +/// +#[instrument(skip(config, tracker, form))] +pub async fn start_job( + config: &HttpApi, + tracker: Arc, + form: ServiceRegistrationForm, + version: Version, +) -> Option> { + let bind_to = config.bind_address; + + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("it should have a valid tracker api tls configuration")); + + let access_tokens = Arc::new(config.access_tokens.clone()); + + match version { + Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), form, access_tokens).await), + } +} + +#[allow(clippy::async_yields_async)] +#[instrument(skip(socket, tls, tracker, form, access_tokens))] +async fn start_v1( + socket: SocketAddr, + tls: Option, + tracker: Arc, + form: ServiceRegistrationForm, + access_tokens: Arc, +) -> JoinHandle<()> { + let server = ApiServer::new(Launcher::new(socket, tls)) + .start(tracker, form, access_tokens) + .await + .expect("it should be able to start to the tracker api"); + + tokio::spawn(async move { + assert!(!server.state.halt_task.is_closed(), "Halt channel should be open"); + server.state.task.await.expect("failed to close service"); + }) +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration::ephemeral_public; + + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::tracker_apis::start_job; + use crate::servers::apis::Version; + use crate::servers::registar::Registar; + + #[tokio::test] + async fn it_should_start_http_tracker() { + let cfg = Arc::new(ephemeral_public()); + let config = &cfg.http_api.clone().unwrap(); + let tracker = initialize_with_configuration(&cfg); + let version = Version::V1; + + start_job(config, tracker, Registar::default().give_form(), version) + .await + .expect("it should be able to join to the tracker api start-job"); + } +} diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs new file mode 100644 index 000000000..ca503aa29 --- /dev/null +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -0,0 +1,58 @@ +//! UDP tracker job starter. +//! +//! The [`udp_tracker::start_job`](crate::bootstrap::jobs::udp_tracker::start_job) +//! function starts a new UDP tracker server. +//! +//! > **NOTICE**: that the application can launch more than one UDP tracker +//! > on different ports. Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +//! > for the configuration options. +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::UdpTracker; +use tracing::instrument; + +use crate::core; +use crate::servers::registar::ServiceRegistrationForm; +use crate::servers::udp::server::spawner::Spawner; +use crate::servers::udp::server::Server; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// It starts a new UDP server with the provided configuration. +/// +/// It spawns a new asynchronous task for the new UDP server. +/// +/// # Panics +/// +/// It will panic if the API binding address is not a valid socket. +/// It will panic if it is unable to start the UDP service. +/// It will panic if the task did not finish successfully. +#[must_use] +#[allow(clippy::async_yields_async)] +#[instrument(skip(config, tracker, form))] +pub async fn start_job(config: &UdpTracker, tracker: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { + let bind_to = config.bind_address; + + let server = Server::new(Spawner::new(bind_to)) + .start(tracker, form) + .await + .expect("it should be able to start the udp tracker"); + + tokio::spawn(async move { + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, "Wait for launcher (UDP service) to finish ..."); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed before waiting?: {}", server.state.halt_task.is_closed()); + + assert!( + !server.state.halt_task.is_closed(), + "Halt channel for UDP tracker should be open" + ); + + server + .state + .task + .await + .expect("it should be able to join to the udp tracker task"); + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed after finishing the server?: {}", server.state.halt_task.is_closed()); + }) +} diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs new file mode 100644 index 000000000..34809c1ca --- /dev/null +++ b/src/bootstrap/logging.rs @@ -0,0 +1,81 @@ +//! Setup for the application logging. +//! +//! It redirects the log info to the standard output with the log threshold +//! defined in the configuration. +//! +//! - `Off` +//! - `Error` +//! - `Warn` +//! - `Info` +//! - `Debug` +//! - `Trace` +//! +//! Refer to the [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) to know how to change log settings. +use std::sync::Once; + +use torrust_tracker_configuration::{Configuration, Threshold}; +use tracing::level_filters::LevelFilter; + +static INIT: Once = Once::new(); + +/// It redirects the log info to the standard output with the log threshold +/// defined in the configuration. +pub fn setup(cfg: &Configuration) { + let tracing_level = map_to_tracing_level_filter(&cfg.logging.threshold); + + if tracing_level == LevelFilter::OFF { + return; + } + + INIT.call_once(|| { + tracing_stdout_init(tracing_level, &TraceStyle::Default); + }); +} + +fn map_to_tracing_level_filter(threshold: &Threshold) -> LevelFilter { + match threshold { + Threshold::Off => LevelFilter::OFF, + Threshold::Error => LevelFilter::ERROR, + Threshold::Warn => LevelFilter::WARN, + Threshold::Info => LevelFilter::INFO, + Threshold::Debug => LevelFilter::DEBUG, + Threshold::Trace => LevelFilter::TRACE, + } +} + +fn tracing_stdout_init(filter: LevelFilter, style: &TraceStyle) { + let builder = tracing_subscriber::fmt().with_max_level(filter).with_ansi(true); + + let () = match style { + TraceStyle::Default => builder.init(), + TraceStyle::Pretty(display_filename) => builder.pretty().with_file(*display_filename).init(), + TraceStyle::Compact => builder.compact().init(), + TraceStyle::Json => builder.json().init(), + }; + + tracing::info!("Logging initialized"); +} + +#[derive(Debug)] +pub enum TraceStyle { + Default, + Pretty(bool), + Compact, + Json, +} + +impl std::fmt::Display for TraceStyle { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let style = match self { + TraceStyle::Default => "Default Style", + TraceStyle::Pretty(path) => match path { + true => "Pretty Style with File Paths", + false => "Pretty Style without File Paths", + }, + TraceStyle::Compact => "Compact Style", + TraceStyle::Json => "Json Format", + }; + + f.write_str(style) + } +} diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs new file mode 100644 index 000000000..22044aafd --- /dev/null +++ b/src/bootstrap/mod.rs @@ -0,0 +1,11 @@ +//! Tracker application bootstrapping. +//! +//! This module includes all the functions to build the application, its dependencies, and run the jobs. +//! +//! Jobs are tasks executed concurrently. Some of them are concurrent because of the asynchronous nature of the task, +//! like cleaning torrents, and other jobs because they can be enabled/disabled depending on the configuration. +//! For example, you can have more than one UDP and HTTP tracker, each server is executed like a independent job. +pub mod app; +pub mod config; +pub mod jobs; +pub mod logging; diff --git a/src/common.rs b/src/common.rs deleted file mode 100644 index 82ea19ab8..000000000 --- a/src/common.rs +++ /dev/null @@ -1,282 +0,0 @@ -use serde::{Deserialize, Serialize}; - -pub const MAX_PACKET_SIZE: usize = 0xffff; -pub const MAX_SCRAPE_TORRENTS: u8 = 74; -pub const PROTOCOL_ID: i64 = 4_497_486_125_440; // protocol constant -pub const AUTH_KEY_LENGTH: usize = 32; - -#[repr(u32)] -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -pub enum Actions { - Connect = 0, - Announce = 1, - Scrape = 2, - Error = 3, -} - -#[repr(u32)] -#[derive(Serialize, Deserialize, Clone, Copy)] -pub enum Events { - None = 0, - Complete = 1, - Started = 2, - Stopped = 3, -} - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub enum AnnounceEvent { - None, - Completed, - Started, - Stopped, -} - -impl AnnounceEvent { - #[inline] - pub fn from_i32(i: i32) -> Self { - match i { - 0 => Self::None, - 1 => Self::Completed, - 2 => Self::Started, - 3 => Self::Stopped, - _ => Self::None, - } - } -} - -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct AnnounceInterval(pub i32); - -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Ord)] -pub struct InfoHash(pub [u8; 20]); - -impl InfoHash { - pub fn to_string(&self) -> String { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - String::from(std::str::from_utf8(bytes_out).unwrap()) - } -} - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self { 0: [0u8; 20] }; - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl std::cmp::PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - self.0.partial_cmp(&other.0) - } -} - -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = InfoHash { 0: [0u8; 20] }; - ret.0.clone_from_slice(data); - return ret; - } -} - -impl std::convert::Into for [u8; 20] { - fn into(self) -> InfoHash { - InfoHash { 0: self } - } -} - -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a 40 character long string", - )); - } - - let mut res = InfoHash { 0: [0u8; 20] }; - - if let Err(_) = binascii::hex2bin(v.as_bytes(), &mut res.0) { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a hexadecimal string", - )); - } else { - return Ok(res); - } - } -} - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct ConnectionId(pub i64); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct TransactionId(pub i32); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct NumberOfBytes(pub i64); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct NumberOfPeers(pub i32); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct NumberOfDownloads(pub i32); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct Port(pub u16); - -#[repr(transparent)] -#[derive(Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug, PartialOrd, Ord)] -pub struct PeerId(pub [u8; 20]); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct PeerKey(pub u32); - -impl PeerId { - pub fn get_client_name(&self) -> Option<&'static str> { - if self.0[0] == b'M' { - return Some("BitTorrent"); - } - if self.0[0] == b'-' { - let name = match &self.0[1..3] { - b"AG" => "Ares", - b"A~" => "Ares", - b"AR" => "Arctic", - b"AV" => "Avicora", - b"AX" => "BitPump", - b"AZ" => "Azureus", - b"BB" => "BitBuddy", - b"BC" => "BitComet", - b"BF" => "Bitflu", - b"BG" => "BTG (uses Rasterbar libtorrent)", - b"BR" => "BitRocket", - b"BS" => "BTSlave", - b"BX" => "~Bittorrent X", - b"CD" => "Enhanced CTorrent", - b"CT" => "CTorrent", - b"DE" => "DelugeTorrent", - b"DP" => "Propagate Data Client", - b"EB" => "EBit", - b"ES" => "electric sheep", - b"FT" => "FoxTorrent", - b"FW" => "FrostWire", - b"FX" => "Freebox BitTorrent", - b"GS" => "GSTorrent", - b"HL" => "Halite", - b"HN" => "Hydranode", - b"KG" => "KGet", - b"KT" => "KTorrent", - b"LH" => "LH-ABC", - b"LP" => "Lphant", - b"LT" => "libtorrent", - b"lt" => "libTorrent", - b"LW" => "LimeWire", - b"MO" => "MonoTorrent", - b"MP" => "MooPolice", - b"MR" => "Miro", - b"MT" => "MoonlightTorrent", - b"NX" => "Net Transport", - b"PD" => "Pando", - b"qB" => "qBittorrent", - b"QD" => "QQDownload", - b"QT" => "Qt 4 Torrent example", - b"RT" => "Retriever", - b"S~" => "Shareaza alpha/beta", - b"SB" => "~Swiftbit", - b"SS" => "SwarmScope", - b"ST" => "SymTorrent", - b"st" => "sharktorrent", - b"SZ" => "Shareaza", - b"TN" => "TorrentDotNET", - b"TR" => "Transmission", - b"TS" => "Torrentstorm", - b"TT" => "TuoTu", - b"UL" => "uLeecher!", - b"UT" => "µTorrent", - b"UW" => "µTorrent Web", - b"VG" => "Vagaa", - b"WD" => "WebTorrent Desktop", - b"WT" => "BitLet", - b"WW" => "WebTorrent", - b"WY" => "FireTorrent", - b"XL" => "Xunlei", - b"XT" => "XanTorrent", - b"XX" => "Xtorrent", - b"ZT" => "ZipTorrent", - _ => return None, - }; - Some(name) - } else { - None - } - } -} -impl Serialize for PeerId { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, { - let mut tmp = [0u8; 40]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - let id = std::str::from_utf8(&tmp).ok(); - - #[derive(Serialize)] - struct PeerIdInfo<'a> { - id: Option<&'a str>, - client: Option<&'a str>, - } - - let obj = PeerIdInfo { - id, - client: self.get_client_name(), - }; - obj.serialize(serializer) - } -} - -impl std::convert::From<&[u8]> for PeerId { - fn from(data: &[u8]) -> PeerId { - assert_eq!(data.len(), 20); - let mut ret = PeerId { 0: [0u8; 20] }; - ret.0.clone_from_slice(data); - return ret; - } -} diff --git a/src/config.rs b/src/config.rs deleted file mode 100644 index 9a7e47e37..000000000 --- a/src/config.rs +++ /dev/null @@ -1,170 +0,0 @@ -pub use crate::tracker::TrackerMode; -use serde::{Serialize, Deserialize, Serializer}; -use std; -use std::collections::HashMap; -use std::fs; -use toml; -use std::net::{IpAddr}; -use std::path::Path; -use std::str::FromStr; -use config::{ConfigError, Config, File}; - -#[derive(Serialize, Deserialize)] -pub struct UdpTrackerConfig { - pub bind_address: String, - pub announce_interval: u32, -} - -#[derive(Serialize, Deserialize)] -pub struct HttpTrackerConfig { - pub enabled: bool, - pub bind_address: String, - pub announce_interval: u32, - pub ssl_enabled: bool, - #[serde(serialize_with = "none_as_empty_string")] - pub ssl_cert_path: Option, - #[serde(serialize_with = "none_as_empty_string")] - pub ssl_key_path: Option -} - -impl HttpTrackerConfig { - pub fn is_ssl_enabled(&self) -> bool { - self.ssl_enabled && self.ssl_cert_path.is_some() && self.ssl_key_path.is_some() - } -} - -#[derive(Serialize, Deserialize)] -pub struct HttpApiConfig { - pub enabled: bool, - pub bind_address: String, - pub access_tokens: HashMap, -} - -#[derive(Serialize, Deserialize)] -pub struct Configuration { - pub log_level: Option, - pub mode: TrackerMode, - pub db_path: String, - pub cleanup_interval: Option, - pub external_ip: Option, - pub udp_tracker: UdpTrackerConfig, - pub http_tracker: Option, - pub http_api: Option, -} - -#[derive(Debug)] -pub enum ConfigurationError { - IOError(std::io::Error), - ParseError(toml::de::Error), -} - -impl std::fmt::Display for ConfigurationError { - fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - ConfigurationError::IOError(e) => e.fmt(formatter), - ConfigurationError::ParseError(e) => e.fmt(formatter), - } - } -} - -impl std::error::Error for ConfigurationError {} - -pub fn none_as_empty_string(option: &Option, serializer: S) -> Result - where - T: Serialize, - S: Serializer, -{ - if let Some(value) = option { - value.serialize(serializer) - } else { - "".serialize(serializer) - } -} - -impl Configuration { - pub fn load(data: &[u8]) -> Result { - toml::from_slice(data) - } - - pub fn load_file(path: &str) -> Result { - match std::fs::read(path) { - Err(e) => Err(ConfigurationError::IOError(e)), - Ok(data) => { - match Self::load(data.as_slice()) { - Ok(cfg) => { - Ok(cfg) - }, - Err(e) => Err(ConfigurationError::ParseError(e)), - } - } - } - } - - pub fn get_ext_ip(&self) -> Option { - match &self.external_ip { - None => None, - Some(external_ip) => { - match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None - } - } - } - } -} - -impl Configuration { - pub fn default() -> Configuration { - Configuration { - log_level: Option::from(String::from("info")), - mode: TrackerMode::PublicMode, - db_path: String::from("data.db"), - cleanup_interval: Some(600), - external_ip: Some(String::from("0.0.0.0")), - udp_tracker: UdpTrackerConfig { - bind_address: String::from("0.0.0.0:6969"), - announce_interval: 120, - }, - http_tracker: Option::from(HttpTrackerConfig { - enabled: false, - bind_address: String::from("0.0.0.0:7878"), - announce_interval: 120, - ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None - }), - http_api: Option::from(HttpApiConfig { - enabled: true, - bind_address: String::from("127.0.0.1:1212"), - access_tokens: [(String::from("admin"), String::from("MyAccessToken"))].iter().cloned().collect(), - }), - } - } - - pub fn load_from_file() -> Result { - let mut config = Config::new(); - - const CONFIG_PATH: &str = "config.toml"; - - if Path::new(CONFIG_PATH).exists() { - config.merge(File::with_name(CONFIG_PATH))?; - } else { - eprintln!("No config file found."); - eprintln!("Creating config file.."); - let config = Configuration::default(); - let _ = config.save_to_file(); - return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))) - } - - match config.try_into() { - Ok(data) => Ok(data), - Err(e) => Err(ConfigError::Message(format!("Errors while processing config: {}.", e))), - } - } - - pub fn save_to_file(&self) -> Result<(), ()>{ - let toml_string = toml::to_string(self).expect("Could not encode TOML value"); - fs::write("config.toml", toml_string).expect("Could not write to file!"); - Ok(()) - } -} diff --git a/src/console/ci/e2e/docker.rs b/src/console/ci/e2e/docker.rs new file mode 100644 index 000000000..ce2b1aa99 --- /dev/null +++ b/src/console/ci/e2e/docker.rs @@ -0,0 +1,234 @@ +//! Docker command wrapper. +use std::io; +use std::process::{Command, Output}; +use std::thread::sleep; +use std::time::{Duration, Instant}; + +/// Docker command wrapper. +pub struct Docker {} + +#[derive(Clone, Debug)] +pub struct RunningContainer { + pub image: String, + pub name: String, + pub output: Output, +} + +impl Drop for RunningContainer { + /// Ensures that the temporary container is stopped when the struct goes out + /// of scope. + fn drop(&mut self) { + tracing::info!("Dropping running container: {}", self.name); + if Docker::is_container_running(&self.name) { + let _unused = Docker::stop(self); + } + } +} + +/// `docker run` command options. +pub struct RunOptions { + pub env_vars: Vec<(String, String)>, + pub ports: Vec, +} + +impl Docker { + /// Builds a Docker image from a given Dockerfile. + /// + /// # Errors + /// + /// Will fail if the docker build command fails. + pub fn build(dockerfile: &str, tag: &str) -> io::Result<()> { + let status = Command::new("docker") + .args(["build", "-f", dockerfile, "-t", tag, "."]) + .status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to build Docker image from dockerfile {dockerfile}"), + )) + } + } + + /// Runs a Docker container from a given image with multiple environment variables. + /// + /// # Arguments + /// + /// * `image` - The Docker image to run. + /// * `container` - The name for the Docker container. + /// * `env_vars` - A slice of tuples, each representing an environment variable as ("KEY", "value"). + /// + /// # Errors + /// + /// Will fail if the docker run command fails. + pub fn run(image: &str, container: &str, options: &RunOptions) -> io::Result { + let initial_args = vec![ + "run".to_string(), + "--detach".to_string(), + "--name".to_string(), + container.to_string(), + ]; + + // Add environment variables + let mut env_var_args: Vec = vec![]; + for (key, value) in &options.env_vars { + env_var_args.push("--env".to_string()); + env_var_args.push(format!("{key}={value}")); + } + + // Add port mappings + let mut port_args: Vec = vec![]; + for port in &options.ports { + port_args.push("--publish".to_string()); + port_args.push(port.to_string()); + } + + let args = [initial_args, env_var_args, port_args, [image.to_string()].to_vec()].concat(); + + tracing::debug!("Docker run args: {:?}", args); + + let output = Command::new("docker").args(args).output()?; + + if output.status.success() { + Ok(RunningContainer { + image: image.to_owned(), + name: container.to_owned(), + output, + }) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to run Docker image {image}"), + )) + } + } + + /// Stops a Docker container. + /// + /// # Errors + /// + /// Will fail if the docker stop command fails. + pub fn stop(container: &RunningContainer) -> io::Result<()> { + let status = Command::new("docker").args(["stop", &container.name]).status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to stop Docker container {}", container.name), + )) + } + } + + /// Removes a Docker container. + /// + /// # Errors + /// + /// Will fail if the docker rm command fails. + pub fn remove(container: &str) -> io::Result<()> { + let status = Command::new("docker").args(["rm", "-f", container]).status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to remove Docker container {container}"), + )) + } + } + + /// Fetches logs from a Docker container. + /// + /// # Errors + /// + /// Will fail if the docker logs command fails. + pub fn logs(container: &str) -> io::Result { + let output = Command::new("docker").args(["logs", container]).output()?; + + if output.status.success() { + Ok(String::from_utf8_lossy(&output.stdout).to_string()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to fetch logs from Docker container {container}"), + )) + } + } + + /// Checks if a Docker container is healthy. + #[must_use] + pub fn wait_until_is_healthy(name: &str, timeout: Duration) -> bool { + let start = Instant::now(); + + while start.elapsed() < timeout { + let Ok(output) = Command::new("docker") + .args(["ps", "-f", &format!("name={name}"), "--format", "{{.Status}}"]) + .output() + else { + return false; + }; + + let output_str = String::from_utf8_lossy(&output.stdout); + + tracing::info!("Waiting until container is healthy: {:?}", output_str); + + if output_str.contains("(healthy)") { + return true; + } + + sleep(Duration::from_secs(1)); + } + + false + } + + /// Checks if a Docker container is running. + /// + /// # Arguments + /// + /// * `container` - The name of the Docker container. + /// + /// # Returns + /// + /// `true` if the container is running, `false` otherwise. + #[must_use] + pub fn is_container_running(container: &str) -> bool { + match Command::new("docker") + .args(["ps", "-f", &format!("name={container}"), "--format", "{{.Names}}"]) + .output() + { + Ok(output) => { + let output_str = String::from_utf8_lossy(&output.stdout); + output_str.contains(container) + } + Err(_) => false, + } + } + + /// Checks if a Docker container exists. + /// + /// # Arguments + /// + /// * `container` - The name of the Docker container. + /// + /// # Returns + /// + /// `true` if the container exists, `false` otherwise. + #[must_use] + pub fn container_exist(container: &str) -> bool { + match Command::new("docker") + .args(["ps", "-a", "-f", &format!("name={container}"), "--format", "{{.Names}}"]) + .output() + { + Ok(output) => { + let output_str = String::from_utf8_lossy(&output.stdout); + output_str.contains(container) + } + Err(_) => false, + } + } +} diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs new file mode 100644 index 000000000..95648a2b5 --- /dev/null +++ b/src/console/ci/e2e/logs_parser.rs @@ -0,0 +1,179 @@ +//! Utilities to parse Torrust Tracker logs. +use regex::Regex; +use serde::{Deserialize, Serialize}; + +use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; +use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::servers::logging::STARTED_ON; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +const INFO_THRESHOLD: &str = "INFO"; + +#[derive(Serialize, Deserialize, Debug, Default)] +pub struct RunningServices { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +impl RunningServices { + /// It parses the tracker logs to extract the running services. + /// + /// For example, from this logs: + /// + /// ```text + /// Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... + /// 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: Logging initialized + /// 2024-06-10T16:07:39.990205Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6868 + /// 2024-06-10T16:07:39.990215Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6868 + /// 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + /// 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + /// 2024-06-10T16:07:39.990261Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 + /// 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 + /// 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 + /// 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 + /// ``` + /// + /// It would extract these services: + /// + /// ```json + /// { + /// "udp_trackers": [ + /// "127.0.0.1:6969" + /// ], + /// "http_trackers": [ + /// "http://127.0.0.1:7070" + /// ], + /// "health_checks": [ + /// "http://127.0.0.1:1313/health_check" + /// ] + /// } + /// ``` + /// + /// NOTICE: Using colors in the console output could affect this method + /// due to the hidden control chars. + /// + /// # Panics + /// + /// Will panic is the regular expression to parse the services can't be compiled. + #[must_use] + pub fn parse_from_logs(logs: &str) -> Self { + let mut udp_trackers: Vec = Vec::new(); + let mut http_trackers: Vec = Vec::new(); + let mut health_checks: Vec = Vec::new(); + + let udp_re = Regex::new(&format!("{STARTED_ON}: {}", r"udp://([0-9.]+:[0-9]+)")).unwrap(); + let http_re = Regex::new(&format!("{STARTED_ON}: {}", r"(https?://[0-9.]+:[0-9]+)")).unwrap(); // DevSkim: ignore DS137138 + let health_re = Regex::new(&format!("{STARTED_ON}: {}", r"(https?://[0-9.]+:[0-9]+)")).unwrap(); // DevSkim: ignore DS137138 + let ansi_escape_re = Regex::new(r"\x1b\[[0-9;]*m").unwrap(); + + for line in logs.lines() { + let clean_line = ansi_escape_re.replace_all(line, ""); + + if !line.contains(INFO_THRESHOLD) { + continue; + }; + + if line.contains(UDP_TRACKER_LOG_TARGET) { + if let Some(captures) = udp_re.captures(&clean_line) { + let address = Self::replace_wildcard_ip_with_localhost(&captures[1]); + udp_trackers.push(address); + } + } else if line.contains(HTTP_TRACKER_LOG_TARGET) { + if let Some(captures) = http_re.captures(&clean_line) { + let address = Self::replace_wildcard_ip_with_localhost(&captures[1]); + http_trackers.push(address); + } + } else if line.contains(HEALTH_CHECK_API_LOG_TARGET) { + if let Some(captures) = health_re.captures(&clean_line) { + let address = format!("{}/health_check", Self::replace_wildcard_ip_with_localhost(&captures[1])); + health_checks.push(address); + } + } + } + + Self { + udp_trackers, + http_trackers, + health_checks, + } + } + + fn replace_wildcard_ip_with_localhost(address: &str) -> String { + address.replace("0.0.0.0", "127.0.0.1") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_should_parse_from_logs_with_valid_logs() { + let logs = r" + Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... + 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: Logging initialized + 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + 2024-06-10T16:07:39.990261Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 + 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 + 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 + 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 + 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 + "; + + let running_services = RunningServices::parse_from_logs(logs); + + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6969"]); + assert_eq!(running_services.http_trackers, vec!["http://127.0.0.1:7070"]); + assert_eq!(running_services.health_checks, vec!["http://127.0.0.1:1313/health_check"]); + } + + #[test] + fn it_should_support_colored_output() { + let logs = "\x1b[2m2024-06-14T14:40:13.028824Z\x1b[0m \x1b[33mINFO\x1b[0m \x1b[2mUDP TRACKER\x1b[0m: \x1b[37mStarted on: udp://0.0.0.0:6969\x1b[0m"; + + let running_services = RunningServices::parse_from_logs(logs); + + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6969"]); + } + + #[test] + fn it_should_ignore_logs_with_no_matching_lines() { + let logs = "[Other Service][INFO] Started on: 0.0.0.0:7070"; + + let running_services = RunningServices::parse_from_logs(logs); + + assert!(running_services.udp_trackers.is_empty()); + assert!(running_services.http_trackers.is_empty()); + assert!(running_services.health_checks.is_empty()); + } + + #[test] + fn it_should_parse_multiple_services() { + let logs = " + 2024-06-10T16:07:39.990205Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6868 + 2024-06-10T16:07:39.990215Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6868 + + 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + "; + + let running_services = RunningServices::parse_from_logs(logs); + + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6868", "127.0.0.1:6969"]); + } + + #[test] + fn it_should_replace_wildcard_ip_with_localhost() { + let address = "0.0.0.0:8080"; + assert_eq!(RunningServices::replace_wildcard_ip_with_localhost(address), "127.0.0.1:8080"); + } +} diff --git a/src/console/ci/e2e/mod.rs b/src/console/ci/e2e/mod.rs new file mode 100644 index 000000000..58a876cbe --- /dev/null +++ b/src/console/ci/e2e/mod.rs @@ -0,0 +1,6 @@ +//! E2E tests scripts. +pub mod docker; +pub mod logs_parser; +pub mod runner; +pub mod tracker_checker; +pub mod tracker_container; diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs new file mode 100644 index 000000000..118ecda42 --- /dev/null +++ b/src/console/ci/e2e/runner.rs @@ -0,0 +1,158 @@ +//! Program to run E2E tests. +//! +//! You can execute it with (passing a TOML config file path): +//! +//! ```text +//! cargo run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" +//! ``` +//! +//! Or: +//! +//! ```text +//! TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.e2e.container.sqlite3.toml" cargo run --bin e2e_tests_runner" +//! ``` +//! +//! You can execute it with (directly passing TOML config): +//! +//! ```text +//! TORRUST_TRACKER_CONFIG_TOML=$(cat "./share/default/config/tracker.e2e.container.sqlite3.toml") cargo run --bin e2e_tests_runner +//! ``` +use std::path::PathBuf; + +use anyhow::Context; +use clap::Parser; +use tracing::level_filters::LevelFilter; + +use super::tracker_container::TrackerContainer; +use crate::console::ci::e2e::docker::RunOptions; +use crate::console::ci::e2e::logs_parser::RunningServices; +use crate::console::ci::e2e::tracker_checker::{self}; + +/* code-review: + - We use always the same docker image name. Should we use a random image name (tag)? + - We use the name image name we use in other workflows `torrust-tracker:local`. + Should we use a different one like `torrust-tracker:e2e`? + - We remove the container after running tests but not the container image. + Should we remove the image too? +*/ + +const CONTAINER_IMAGE: &str = "torrust-tracker:local"; +const CONTAINER_NAME_PREFIX: &str = "tracker_"; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to the JSON configuration file. + #[clap(short, long, env = "TORRUST_TRACKER_CONFIG_TOML_PATH")] + config_toml_path: Option, + + /// Direct configuration content in JSON. + #[clap(env = "TORRUST_TRACKER_CONFIG_TOML", hide_env_values = true)] + config_toml: Option, +} + +/// Script to run E2E tests. +/// +/// # Errors +/// +/// Will return an error if it can't load the tracker configuration from arguments. +/// +/// # Panics +/// +/// Will panic if it can't not perform any of the operations. +pub fn run() -> anyhow::Result<()> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let tracker_config = load_tracker_configuration(&args)?; + + tracing::info!("tracker config:\n{tracker_config}"); + + let mut tracker_container = TrackerContainer::new(CONTAINER_IMAGE, CONTAINER_NAME_PREFIX); + + tracker_container.build_image(); + + // code-review: if we want to use port 0 we don't know which ports we have to open. + // Besides, if we don't use port 0 we should get the port numbers from the tracker configuration. + // We could not use docker, but the intention was to create E2E tests including containerization. + let options = RunOptions { + env_vars: vec![("TORRUST_TRACKER_CONFIG_TOML".to_string(), tracker_config.to_string())], + ports: vec![ + "6969:6969/udp".to_string(), + "7070:7070/tcp".to_string(), + "1212:1212/tcp".to_string(), + "1313:1313/tcp".to_string(), + ], + }; + + tracker_container.run(&options); + + let running_services = tracker_container.running_services(); + + tracing::info!( + "Running services:\n {}", + serde_json::to_string_pretty(&running_services).expect("running services to be serializable to JSON") + ); + + assert_there_is_at_least_one_service_per_type(&running_services); + + let tracker_checker_config = + serde_json::to_string_pretty(&running_services).expect("Running services should be serialized into JSON"); + + tracker_checker::run(&tracker_checker_config).expect("All tracker services should be running correctly"); + + // More E2E tests could be added here in the future. + // For example: `cargo test ...` for only E2E tests, using this shared test env. + + tracker_container.stop(); + + tracker_container.remove(); + + tracing::info!("Tracker container final state:\n{:#?}", tracker_container); + + Ok(()) +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + tracing::info!("Logging initialized"); +} + +fn load_tracker_configuration(args: &Args) -> anyhow::Result { + match (args.config_toml_path.clone(), args.config_toml.clone()) { + (Some(config_path), _) => { + tracing::info!( + "Reading tracker configuration from file: {} ...", + config_path.to_string_lossy() + ); + load_config_from_file(&config_path) + } + (_, Some(config_content)) => { + tracing::info!("Reading tracker configuration from env var ..."); + Ok(config_content) + } + _ => Err(anyhow::anyhow!("No configuration provided")), + } +} + +fn load_config_from_file(path: &PathBuf) -> anyhow::Result { + let config = std::fs::read_to_string(path).with_context(|| format!("CSan't read config file {path:?}"))?; + + Ok(config) +} + +fn assert_there_is_at_least_one_service_per_type(running_services: &RunningServices) { + assert!( + !running_services.udp_trackers.is_empty(), + "At least one UDP tracker should be enabled in E2E tests configuration" + ); + assert!( + !running_services.http_trackers.is_empty(), + "At least one HTTP tracker should be enabled in E2E tests configuration" + ); + assert!( + !running_services.health_checks.is_empty(), + "At least one Health Check should be enabled in E2E tests configuration" + ); +} diff --git a/src/console/ci/e2e/tracker_checker.rs b/src/console/ci/e2e/tracker_checker.rs new file mode 100644 index 000000000..192795e61 --- /dev/null +++ b/src/console/ci/e2e/tracker_checker.rs @@ -0,0 +1,23 @@ +use std::io; +use std::process::Command; + +/// Runs the Tracker Checker. +/// +/// # Errors +/// +/// Will return an error if the Tracker Checker fails. +pub fn run(config_content: &str) -> io::Result<()> { + tracing::info!("Running Tracker Checker: TORRUST_CHECKER_CONFIG=[config] cargo run --bin tracker_checker"); + tracing::info!("Tracker Checker config:\n{config_content}"); + + let status = Command::new("cargo") + .env("TORRUST_CHECKER_CONFIG", config_content) + .args(["run", "--bin", "tracker_checker"]) + .status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new(io::ErrorKind::Other, "Failed to run Tracker Checker")) + } +} diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs new file mode 100644 index 000000000..0d15035a8 --- /dev/null +++ b/src/console/ci/e2e/tracker_container.rs @@ -0,0 +1,134 @@ +use std::time::Duration; + +use rand::distributions::Alphanumeric; +use rand::Rng; + +use super::docker::{RunOptions, RunningContainer}; +use super::logs_parser::RunningServices; +use crate::console::ci::e2e::docker::Docker; + +#[derive(Debug)] +pub struct TrackerContainer { + pub image: String, + pub name: String, + pub running: Option, +} + +impl Drop for TrackerContainer { + /// Ensures that the temporary container is removed when the + /// struct goes out of scope. + fn drop(&mut self) { + tracing::info!("Dropping tracker container: {}", self.name); + if Docker::container_exist(&self.name) { + let _unused = Docker::remove(&self.name); + } + } +} + +impl TrackerContainer { + #[must_use] + pub fn new(tag: &str, container_name_prefix: &str) -> Self { + Self { + image: tag.to_owned(), + name: Self::generate_random_container_name(container_name_prefix), + running: None, + } + } + + /// # Panics + /// + /// Will panic if it can't build the docker image. + pub fn build_image(&self) { + tracing::info!("Building tracker container image with tag: {} ...", self.image); + Docker::build("./Containerfile", &self.image).expect("A tracker local docker image should be built"); + } + + /// # Panics + /// + /// Will panic if it can't run the container. + pub fn run(&mut self, options: &RunOptions) { + tracing::info!("Running docker tracker image: {} ...", self.name); + + let container = Docker::run(&self.image, &self.name, options).expect("A tracker local docker image should be running"); + + tracing::info!("Waiting for the container {} to be healthy ...", self.name); + + let is_healthy = Docker::wait_until_is_healthy(&self.name, Duration::from_secs(10)); + + assert!(is_healthy, "Unhealthy tracker container: {}", &self.name); + + tracing::info!("Container {} is healthy ...", &self.name); + + self.running = Some(container); + + self.assert_there_are_no_panics_in_logs(); + } + + /// # Panics + /// + /// Will panic if it can't get the logs from the running container. + #[must_use] + pub fn running_services(&self) -> RunningServices { + let logs = Docker::logs(&self.name).expect("Logs should be captured from running container"); + + tracing::info!("Parsing running services from logs. Logs :\n{logs}"); + + RunningServices::parse_from_logs(&logs) + } + + /// # Panics + /// + /// Will panic if it can't stop the container. + pub fn stop(&mut self) { + match &self.running { + Some(container) => { + tracing::info!("Stopping docker tracker container: {} ...", self.name); + + Docker::stop(container).expect("Container should be stopped"); + + self.assert_there_are_no_panics_in_logs(); + } + None => { + if Docker::is_container_running(&self.name) { + tracing::error!("Tracker container {} was started manually", self.name); + } else { + tracing::info!("Docker tracker container is not running: {} ...", self.name); + } + } + } + + self.running = None; + } + + /// # Panics + /// + /// Will panic if it can't remove the container. + pub fn remove(&self) { + if let Some(_running_container) = &self.running { + tracing::error!("Can't remove running container: {} ...", self.name); + } else { + tracing::info!("Removing docker tracker container: {} ...", self.name); + Docker::remove(&self.name).expect("Container should be removed"); + } + } + + fn generate_random_container_name(prefix: &str) -> String { + let rand_string: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(20) + .map(char::from) + .collect(); + + format!("{prefix}{rand_string}") + } + + fn assert_there_are_no_panics_in_logs(&self) { + let logs = Docker::logs(&self.name).expect("Logs should be captured from running container"); + + assert!( + !(logs.contains(" panicked at ") || logs.contains("RUST_BACKTRACE=1")), + "{}", + format!("Panics found is logs:\n{logs}") + ); + } +} diff --git a/src/console/ci/mod.rs b/src/console/ci/mod.rs new file mode 100644 index 000000000..6eac3e120 --- /dev/null +++ b/src/console/ci/mod.rs @@ -0,0 +1,2 @@ +//! Continuos integration scripts. +pub mod e2e; diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs new file mode 100644 index 000000000..395f65df9 --- /dev/null +++ b/src/console/clients/checker/app.rs @@ -0,0 +1,120 @@ +//! Program to run checks against running trackers. +//! +//! Run providing a config file path: +//! +//! ```text +//! cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" +//! TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker +//! ``` +//! +//! Run providing the configuration: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker +//! ``` +//! +//! Another real example to test the Torrust demo tracker: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG='{ +//! "udp_trackers": ["144.126.245.19:6969"], +//! "http_trackers": ["https://tracker.torrust-demo.com"], +//! "health_checks": ["https://tracker.torrust-demo.com/api/health_check"] +//! }' cargo run --bin tracker_checker +//! ``` +//! +//! The output should be something like the following: +//! +//! ```json +//! { +//! "udp_trackers": [ +//! { +//! "url": "144.126.245.19:6969", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "http_trackers": [ +//! { +//! "url": "https://tracker.torrust-demo.com/", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "health_checks": [ +//! { +//! "url": "https://tracker.torrust-demo.com/api/health_check", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ] +//! } +//! ``` +use std::path::PathBuf; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use clap::Parser; +use tracing::level_filters::LevelFilter; + +use super::config::Configuration; +use super::console::Console; +use super::service::{CheckResult, Service}; +use crate::console::clients::checker::config::parse_from_json; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to the JSON configuration file. + #[clap(short, long, env = "TORRUST_CHECKER_CONFIG_PATH")] + config_path: Option, + + /// Direct configuration content in JSON. + #[clap(env = "TORRUST_CHECKER_CONFIG", hide_env_values = true)] + config_content: Option, +} + +/// # Errors +/// +/// Will return an error if the configuration was not provided. +pub async fn run() -> Result> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let config = setup_config(args)?; + + let console_printer = Console {}; + + let service = Service { + config: Arc::new(config), + console: console_printer, + }; + + service.run_checks().await.context("it should run the check tasks") +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + tracing::debug!("Logging initialized"); +} + +fn setup_config(args: Args) -> Result { + match (args.config_path, args.config_content) { + (Some(config_path), _) => load_config_from_file(&config_path), + (_, Some(config_content)) => parse_from_json(&config_content).context("invalid config format"), + _ => Err(anyhow::anyhow!("no configuration provided")), + } +} + +fn load_config_from_file(path: &PathBuf) -> Result { + let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {path:?}"))?; + + parse_from_json(&file_content).context("invalid config format") +} diff --git a/src/console/clients/checker/checks/health.rs b/src/console/clients/checker/checks/health.rs new file mode 100644 index 000000000..b1fb79148 --- /dev/null +++ b/src/console/clients/checker/checks/health.rs @@ -0,0 +1,77 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Result; +use hyper::StatusCode; +use reqwest::{Client as HttpClient, Response}; +use serde::Serialize; +use thiserror::Error; +use url::Url; + +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Build a Http Client: {err:?}")] + ClientBuildingError { err: Arc }, + #[error("Heath check failed to get a response: {err:?}")] + ResponseError { err: Arc }, + #[error("Http check returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] + UnsuccessfulResponse { code: StatusCode, response: Arc }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + result: Result, +} + +pub async fn run(health_checks: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("Health checks ..."); + + for url in health_checks { + let result = match run_health_check(url.clone(), timeout).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err), + }; + + let check = Checks { url, result }; + + if check.result.is_err() { + results.push(Err(check)); + } else { + results.push(Ok(check)); + } + } + + results +} + +async fn run_health_check(url: Url, timeout: Duration) -> Result { + let client = HttpClient::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + let response = client + .get(url.clone()) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() })?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } +} diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs new file mode 100644 index 000000000..0904f4e6e --- /dev/null +++ b/src/console/clients/checker/checks/http.rs @@ -0,0 +1,104 @@ +use std::str::FromStr as _; +use std::time::Duration; + +use serde::Serialize; +use torrust_tracker_primitives::info_hash::InfoHash; +use url::Url; + +use crate::console::clients::http::Error; +use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; +use crate::shared::bit_torrent::tracker::http::client::responses::scrape; +use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + results: Vec<(Check, Result<(), Error>)>, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Announce, + Scrape, +} + +pub async fn run(http_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("HTTP trackers ..."); + + for ref url in http_trackers { + let mut base_url = url.clone(); + base_url.set_path(""); + + let mut checks = Checks { + url: url.clone(), + results: Vec::default(), + }; + + // Announce + { + let check = check_http_announce(&base_url, timeout).await.map(|_| ()); + + checks.results.push((Check::Announce, check)); + } + + // Scrape + { + let check = check_http_scrape(&base_url, timeout).await.map(|_| ()); + + checks.results.push((Check::Scrape, check)); + } + + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); + } else { + results.push(Ok(checks)); + } + } + + results +} + +async fn check_http_announce(url: &Url, timeout: Duration) -> Result { + let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 + let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); + + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client + .announce( + &requests::announce::QueryBuilder::with_default_values() + .with_info_hash(&info_hash) + .query(), + ) + .await + .map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = serde_bencode::from_bytes::(&response).map_err(|e| Error::ParseBencodeError { + data: response, + err: e.into(), + })?; + + Ok(response) +} + +async fn check_http_scrape(url: &Url, timeout: Duration) -> Result { + let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 + let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); + + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client.scrape(&query).await.map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = scrape::Response::try_from_bencoded(&response).map_err(|e| Error::BencodeParseError { + data: response, + err: e.into(), + })?; + + Ok(response) +} diff --git a/src/console/clients/checker/checks/mod.rs b/src/console/clients/checker/checks/mod.rs new file mode 100644 index 000000000..f8b03f749 --- /dev/null +++ b/src/console/clients/checker/checks/mod.rs @@ -0,0 +1,4 @@ +pub mod health; +pub mod http; +pub mod structs; +pub mod udp; diff --git a/src/console/clients/checker/checks/structs.rs b/src/console/clients/checker/checks/structs.rs new file mode 100644 index 000000000..d28e20c04 --- /dev/null +++ b/src/console/clients/checker/checks/structs.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct Status { + pub code: String, + pub message: String, +} +#[derive(Serialize, Deserialize)] +pub struct CheckerOutput { + pub url: String, + pub status: Status, +} diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs new file mode 100644 index 000000000..4044b4c52 --- /dev/null +++ b/src/console/clients/checker/checks/udp.rs @@ -0,0 +1,134 @@ +use std::net::SocketAddr; +use std::time::Duration; + +use aquatic_udp_protocol::TransactionId; +use hex_literal::hex; +use serde::Serialize; +use url::Url; + +use crate::console::clients::udp::checker::Client; +use crate::console::clients::udp::Error; + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + remote_addr: SocketAddr, + results: Vec<(Check, Result<(), Error>)>, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Setup, + Connect, + Announce, + Scrape, +} + +#[allow(clippy::missing_panics_doc)] +pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("UDP trackers ..."); + + let info_hash = aquatic_udp_protocol::InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 + + for remote_url in udp_trackers { + let remote_addr = resolve_socket_addr(&remote_url); + + let mut checks = Checks { + remote_addr, + results: Vec::default(), + }; + + tracing::debug!("UDP tracker: {:?}", remote_url); + + // Setup + let client = match Client::new(remote_addr, timeout).await { + Ok(client) => { + checks.results.push((Check::Setup, Ok(()))); + client + } + Err(err) => { + checks.results.push((Check::Setup, Err(err))); + results.push(Err(checks)); + break; + } + }; + + let transaction_id = TransactionId::new(1); + + // Connect Remote + let connection_id = match client.send_connection_request(transaction_id).await { + Ok(connection_id) => { + checks.results.push((Check::Connect, Ok(()))); + connection_id + } + Err(err) => { + checks.results.push((Check::Connect, Err(err))); + results.push(Err(checks)); + break; + } + }; + + // Announce + { + let check = client + .send_announce_request(transaction_id, connection_id, info_hash.into()) + .await + .map(|_| ()); + + checks.results.push((Check::Announce, check)); + } + + // Scrape + { + let check = client + .send_scrape_request(connection_id, transaction_id, &[info_hash.into()]) + .await + .map(|_| ()); + + checks.results.push((Check::Scrape, check)); + } + + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); + } else { + results.push(Ok(checks)); + } + } + + results +} + +fn resolve_socket_addr(url: &Url) -> SocketAddr { + let socket_addr = url.socket_addrs(|| None).unwrap(); + *socket_addr.first().unwrap() +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use url::Url; + + use crate::console::clients::checker::checks::udp::resolve_socket_addr; + + #[test] + fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_a_domain() { + let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); + + assert!( + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + ); + } + + #[test] + fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_an_ip() { + let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); + + assert!( + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + ); + } +} diff --git a/src/console/clients/checker/config.rs b/src/console/clients/checker/config.rs new file mode 100644 index 000000000..154dcae85 --- /dev/null +++ b/src/console/clients/checker/config.rs @@ -0,0 +1,282 @@ +use std::error::Error; +use std::fmt; + +use reqwest::Url as ServiceUrl; +use serde::Deserialize; + +/// It parses the configuration from a JSON format. +/// +/// # Errors +/// +/// Will return an error if the configuration is not valid. +/// +/// # Panics +/// +/// Will panic if unable to read the configuration file. +pub fn parse_from_json(json: &str) -> Result { + let plain_config: PlainConfiguration = serde_json::from_str(json).map_err(ConfigurationError::JsonParseError)?; + Configuration::try_from(plain_config) +} + +/// DTO for the configuration to serialize/deserialize configuration. +/// +/// Configuration does not need to be valid. +#[derive(Deserialize)] +struct PlainConfiguration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +/// Validated configuration +pub struct Configuration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +#[derive(Debug)] +pub enum ConfigurationError { + JsonParseError(serde_json::Error), + InvalidUdpAddress(std::net::AddrParseError), + InvalidUrl(url::ParseError), +} + +impl Error for ConfigurationError {} + +impl fmt::Display for ConfigurationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConfigurationError::JsonParseError(e) => write!(f, "JSON parse error: {e}"), + ConfigurationError::InvalidUdpAddress(e) => write!(f, "Invalid UDP address: {e}"), + ConfigurationError::InvalidUrl(e) => write!(f, "Invalid URL: {e}"), + } + } +} + +impl TryFrom for Configuration { + type Error = ConfigurationError; + + fn try_from(plain_config: PlainConfiguration) -> Result { + let udp_trackers = plain_config + .udp_trackers + .into_iter() + .map(|s| if s.starts_with("udp://") { s } else { format!("udp://{s}") }) + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + let http_trackers = plain_config + .http_trackers + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + let health_checks = plain_config + .health_checks + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + Ok(Configuration { + udp_trackers, + http_trackers, + health_checks, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn configuration_should_be_build_from_plain_serializable_configuration() { + let dto = PlainConfiguration { + udp_trackers: vec!["udp://127.0.0.1:8080".to_string()], + http_trackers: vec!["http://127.0.0.1:8080".to_string()], + health_checks: vec!["http://127.0.0.1:8080/health".to_string()], + }; + + let config = Configuration::try_from(dto).expect("A valid configuration"); + + assert_eq!(config.udp_trackers, vec![ServiceUrl::parse("udp://127.0.0.1:8080").unwrap()]); + + assert_eq!( + config.http_trackers, + vec![ServiceUrl::parse("http://127.0.0.1:8080").unwrap()] + ); + + assert_eq!( + config.health_checks, + vec![ServiceUrl::parse("http://127.0.0.1:8080/health").unwrap()] + ); + } + + mod building_configuration_from_plain_configuration_for { + + mod udp_trackers { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; + + /* The plain configuration should allow UDP URLs with: + + - IP or domain. + - With or without scheme. + - With or without `announce` suffix. + - With or without `/` at the end of the authority section (with empty path). + + For example: + + 127.0.0.1:6969 + 127.0.0.1:6969/ + 127.0.0.1:6969/announce + + localhost:6969 + localhost:6969/ + localhost:6969/announce + + udp://127.0.0.1:6969 + udp://127.0.0.1:6969/ + udp://127.0.0.1:6969/announce + + udp://localhost:6969 + udp://localhost:6969/ + udp://localhost:6969/announce + + */ + + #[test] + fn it_should_fail_when_a_tracker_udp_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["invalid URL".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_add_the_udp_scheme_to_the_udp_url_when_it_is_missing() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969".parse::().unwrap()); + } + + #[test] + fn it_should_allow_using_domains() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["udp://localhost:6969".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://localhost:6969".parse::().unwrap()); + } + + #[test] + fn it_should_allow_the_url_to_have_an_empty_path() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969/".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969/".parse::().unwrap()); + } + + #[test] + fn it_should_allow_the_url_to_contain_a_path() { + // This is the common format for UDP tracker URLs: + // udp://domain.com:6969/announce + + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969/announce".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.udp_trackers[0], + "udp://127.0.0.1:6969/announce".parse::().unwrap() + ); + } + } + + mod http_trackers { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; + + #[test] + fn it_should_fail_when_a_tracker_http_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["invalid URL".to_string()], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_allow_the_url_to_contain_a_path() { + // This is the common format for HTTP tracker URLs: + // http://domain.com:7070/announce + + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["http://127.0.0.1:7070/announce".to_string()], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.http_trackers[0], + "http://127.0.0.1:7070/announce".parse::().unwrap() + ); + } + + #[test] + fn it_should_allow_the_url_to_contain_an_empty_path() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["http://127.0.0.1:7070/".to_string()], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.http_trackers[0], + "http://127.0.0.1:7070/".parse::().unwrap() + ); + } + } + + mod health_checks { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; + + #[test] + fn it_should_fail_when_a_health_check_http_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec![], + health_checks: vec!["invalid URL".to_string()], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + } + } +} diff --git a/src/console/clients/checker/console.rs b/src/console/clients/checker/console.rs new file mode 100644 index 000000000..b55c559fc --- /dev/null +++ b/src/console/clients/checker/console.rs @@ -0,0 +1,38 @@ +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Console {} + +impl Default for Console { + fn default() -> Self { + Self::new() + } +} + +impl Console { + #[must_use] + pub fn new() -> Self { + Self {} + } +} + +impl Printer for Console { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + print!("{}", &output); + } + + fn eprint(&self, output: &str) { + eprint!("{}", &output); + } + + fn println(&self, output: &str) { + println!("{}", &output); + } + + fn eprintln(&self, output: &str) { + eprintln!("{}", &output); + } +} diff --git a/src/console/clients/checker/logger.rs b/src/console/clients/checker/logger.rs new file mode 100644 index 000000000..50e97189f --- /dev/null +++ b/src/console/clients/checker/logger.rs @@ -0,0 +1,72 @@ +use std::cell::RefCell; + +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Logger { + output: RefCell, +} + +impl Default for Logger { + fn default() -> Self { + Self::new() + } +} + +impl Logger { + #[must_use] + pub fn new() -> Self { + Self { + output: RefCell::new(String::new()), + } + } + + pub fn log(&self) -> String { + self.output.borrow().clone() + } +} + +impl Printer for Logger { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn eprint(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn println(&self, output: &str) { + self.print(&format!("{}/n", &output)); + } + + fn eprintln(&self, output: &str) { + self.eprint(&format!("{}/n", &output)); + } +} + +#[cfg(test)] +mod tests { + use crate::console::clients::checker::logger::Logger; + use crate::console::clients::checker::printer::{Printer, CLEAR_SCREEN}; + + #[test] + fn should_capture_the_clear_screen_command() { + let console_logger = Logger::new(); + + console_logger.clear(); + + assert_eq!(CLEAR_SCREEN, console_logger.log()); + } + + #[test] + fn should_capture_the_print_command_output() { + let console_logger = Logger::new(); + + console_logger.print("OUTPUT"); + + assert_eq!("OUTPUT", console_logger.log()); + } +} diff --git a/src/console/clients/checker/mod.rs b/src/console/clients/checker/mod.rs new file mode 100644 index 000000000..d26a4a686 --- /dev/null +++ b/src/console/clients/checker/mod.rs @@ -0,0 +1,7 @@ +pub mod app; +pub mod checks; +pub mod config; +pub mod console; +pub mod logger; +pub mod printer; +pub mod service; diff --git a/src/console/clients/checker/printer.rs b/src/console/clients/checker/printer.rs new file mode 100644 index 000000000..d590dfedb --- /dev/null +++ b/src/console/clients/checker/printer.rs @@ -0,0 +1,9 @@ +pub const CLEAR_SCREEN: &str = "\x1B[2J\x1B[1;1H"; + +pub trait Printer { + fn clear(&self); + fn print(&self, output: &str); + fn eprint(&self, output: &str); + fn println(&self, output: &str); + fn eprintln(&self, output: &str); +} diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs new file mode 100644 index 000000000..acd312d8c --- /dev/null +++ b/src/console/clients/checker/service.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use futures::FutureExt as _; +use serde::Serialize; +use tokio::task::{JoinError, JoinSet}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; + +use super::checks::{health, http, udp}; +use super::config::Configuration; +use super::console::Console; +use crate::console::clients::checker::printer::Printer; + +pub struct Service { + pub(crate) config: Arc, + pub(crate) console: Console, +} + +#[derive(Debug, Clone, Serialize)] +pub enum CheckResult { + Udp(Result), + Http(Result), + Health(Result), +} + +impl Service { + /// # Errors + /// + /// It will return an error if some of the tests panic or otherwise fail to run. + /// On success it will return a vector of `Ok(())` of [`CheckResult`]. + /// + /// # Panics + /// + /// It would panic if `serde_json` produces invalid json for the `to_string_pretty` function. + pub async fn run_checks(self) -> Result, JoinError> { + tracing::info!("Running checks for trackers ..."); + + let mut check_results = Vec::default(); + + let mut checks = JoinSet::new(); + checks.spawn( + udp::run(self.config.udp_trackers.clone(), DEFAULT_TIMEOUT).map(|mut f| f.drain(..).map(CheckResult::Udp).collect()), + ); + checks.spawn( + http::run(self.config.http_trackers.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Http).collect()), + ); + checks.spawn( + health::run(self.config.health_checks.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Health).collect()), + ); + + while let Some(results) = checks.join_next().await { + check_results.append(&mut results?); + } + + let json_output = serde_json::json!(check_results); + self.console + .println(&serde_json::to_string_pretty(&json_output).expect("it should consume valid json")); + + Ok(check_results) + } +} diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs new file mode 100644 index 000000000..a54db5f8b --- /dev/null +++ b/src/console/clients/http/app.rs @@ -0,0 +1,102 @@ +//! HTTP Tracker client: +//! +//! Examples: +//! +//! `Announce` request: +//! +//! ```text +//! cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! `Scrape` request: +//! +//! ```text +//! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +use std::str::FromStr; +use std::time::Duration; + +use anyhow::Context; +use clap::{Parser, Subcommand}; +use reqwest::Url; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; +use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; +use crate::shared::bit_torrent::tracker::http::client::responses::scrape; +use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { tracker_url: String, info_hash: String }, + Scrape { tracker_url: String, info_hashes: Vec }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +pub async fn run() -> anyhow::Result<()> { + let args = Args::parse(); + + match args.command { + Command::Announce { tracker_url, info_hash } => { + announce_command(tracker_url, info_hash, DEFAULT_TIMEOUT).await?; + } + Command::Scrape { + tracker_url, + info_hashes, + } => { + scrape_command(&tracker_url, &info_hashes, DEFAULT_TIMEOUT).await?; + } + } + + Ok(()) +} + +async fn announce_command(tracker_url: String, info_hash: String, timeout: Duration) -> anyhow::Result<()> { + let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; + let info_hash = + InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); + + let response = Client::new(base_url, timeout)? + .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) + .await?; + + let body = response.bytes().await?; + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&announce_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} + +async fn scrape_command(tracker_url: &str, info_hashes: &[String], timeout: Duration) -> anyhow::Result<()> { + let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; + + let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; + + let response = Client::new(base_url, timeout)?.scrape(&query).await?; + + let body = response.bytes().await?; + + let scrape_response = scrape::Response::try_from_bencoded(&body) + .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&scrape_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} diff --git a/src/console/clients/http/mod.rs b/src/console/clients/http/mod.rs new file mode 100644 index 000000000..eaa71957f --- /dev/null +++ b/src/console/clients/http/mod.rs @@ -0,0 +1,36 @@ +use std::sync::Arc; + +use serde::Serialize; +use thiserror::Error; + +use crate::shared::bit_torrent::tracker::http::client::responses::scrape::BencodeParseError; + +pub mod app; + +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Http request did not receive a response within the timeout: {err:?}")] + HttpClientError { + err: crate::shared::bit_torrent::tracker::http::client::Error, + }, + #[error("Http failed to get a response at all: {err:?}")] + ResponseError { err: Arc }, + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + ParseBencodeError { + data: hyper::body::Bytes, + err: Arc, + }, + + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + BencodeParseError { + data: hyper::body::Bytes, + err: Arc, + }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/src/console/clients/mod.rs b/src/console/clients/mod.rs new file mode 100644 index 000000000..8492f8ba5 --- /dev/null +++ b/src/console/clients/mod.rs @@ -0,0 +1,4 @@ +//! Console clients. +pub mod checker; +pub mod http; +pub mod udp; diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs new file mode 100644 index 000000000..c2ba647b8 --- /dev/null +++ b/src/console/clients/udp/app.rs @@ -0,0 +1,208 @@ +//! UDP Tracker client: +//! +//! Examples: +//! +//! Announce request: +//! +//! ```text +//! cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Announce response: +//! +//! ```json +//! { +//! "transaction_id": -888840697 +//! "announce_interval": 120, +//! "leechers": 0, +//! "seeders": 1, +//! "peers": [ +//! "123.123.123.123:51289" +//! ], +//! } +//! ``` +//! +//! Scrape request: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Scrape response: +//! +//! ```json +//! { +//! "transaction_id": -888840697, +//! "torrent_stats": [ +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! }, +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! } +//! ] +//! } +//! ``` +//! +//! You can use an URL with instead of the socket address. For example: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969/scrape 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. +use std::net::{SocketAddr, ToSocketAddrs}; +use std::str::FromStr; + +use anyhow::Context; +use aquatic_udp_protocol::{Response, TransactionId}; +use clap::{Parser, Subcommand}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; +use tracing::level_filters::LevelFilter; +use url::Url; + +use super::Error; +use crate::console::clients::udp::checker; +use crate::console::clients::udp::responses::dto::SerializableResponse; +use crate::console::clients::udp::responses::json::ToJson; + +const RANDOM_TRANSACTION_ID: i32 = -888_840_697; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash)] + info_hash: TorrustInfoHash, + }, + Scrape { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash, num_args = 1..=74, value_delimiter = ' ')] + info_hashes: Vec, + }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +/// +/// +pub async fn run() -> anyhow::Result<()> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let response = match args.command { + Command::Announce { + tracker_socket_addr: remote_addr, + info_hash, + } => handle_announce(remote_addr, &info_hash).await?, + Command::Scrape { + tracker_socket_addr: remote_addr, + info_hashes, + } => handle_scrape(remote_addr, &info_hashes).await?, + }; + + let response: SerializableResponse = response.into(); + let response_json = response.to_json_string()?; + + print!("{response_json}"); + + Ok(()) +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + tracing::debug!("Logging initialized"); +} + +async fn handle_announce(remote_addr: SocketAddr, info_hash: &TorrustInfoHash) -> Result { + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); + + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client.send_announce_request(transaction_id, connection_id, *info_hash).await +} + +async fn handle_scrape(remote_addr: SocketAddr, info_hashes: &[TorrustInfoHash]) -> Result { + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); + + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client.send_scrape_request(connection_id, transaction_id, info_hashes).await +} + +fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { + tracing::debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); + + // Check if the address is a valid URL. If so, extract the host and port. + let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { + tracing::debug!("Tracker socket address URL: {url:?}"); + + let host = url + .host_str() + .with_context(|| format!("invalid host in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + let port = url + .port() + .with_context(|| format!("port not found in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + (host, port) + } else { + // If not a URL, assume it's a host:port pair. + + let parts: Vec<&str> = tracker_socket_addr_str.split(':').collect(); + + if parts.len() != 2 { + return Err(anyhow::anyhow!( + "invalid address format: `{}`. Expected format is host:port", + tracker_socket_addr_str + )); + } + + let host = parts[0].to_owned(); + + let port = parts[1] + .parse::() + .with_context(|| format!("invalid port: `{}`", parts[1]))? + .to_owned(); + + (host, port) + }; + + tracing::debug!("Resolved address: {resolved_addr:#?}"); + + // Perform DNS resolution. + let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); + if socket_addrs.is_empty() { + Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) + } else { + Ok(socket_addrs[0]) + } +} + +fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { + TorrustInfoHash::from_str(info_hash_str) + .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) +} diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs new file mode 100644 index 000000000..437af33e0 --- /dev/null +++ b/src/console/clients/udp/checker.rs @@ -0,0 +1,177 @@ +use std::net::{Ipv4Addr, SocketAddr}; +use std::num::NonZeroU16; +use std::time::Duration; + +use aquatic_udp_protocol::common::InfoHash; +use aquatic_udp_protocol::{ + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, +}; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; + +use super::Error; +use crate::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + +/// A UDP Tracker client to make test requests (checks). +#[derive(Debug)] +pub struct Client { + client: UdpTrackerClient, +} + +impl Client { + /// Creates a new `[Client]` for checking a UDP Tracker Service + /// + /// # Errors + /// + /// It will error if unable to bind and connect to the udp remote address. + /// + pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = UdpTrackerClient::new(remote_addr, timeout) + .await + .map_err(|err| Error::UnableToBindAndConnect { remote_addr, err })?; + + Ok(Self { client }) + } + + /// Returns the local addr of this [`Client`]. + /// + /// # Errors + /// + /// This function will return an error if the socket is somehow not bound. + pub fn local_addr(&self) -> std::io::Result { + self.client.client.socket.local_addr() + } + + /// Sends a connection request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if + /// + /// - It can't connect to the remote UDP socket. + /// - It can't make a connection request successfully to the remote UDP + /// server (after successfully connecting to the remote UDP socket). + /// + /// # Panics + /// + /// Will panic if it receives an unexpected response. + pub async fn send_connection_request(&self, transaction_id: TransactionId) -> Result { + tracing::debug!("Sending connection request with transaction id: {transaction_id:#?}"); + + let connect_request = ConnectRequest { transaction_id }; + + let _ = self + .client + .send(connect_request.into()) + .await + .map_err(|err| Error::UnableToSendConnectionRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveConnectResponse { err })?; + + match response { + Response::Connect(connect_response) => Ok(connect_response.connection_id), + _ => Err(Error::UnexpectedConnectionResponse { response }), + } + } + + /// Sends an announce request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + /// + /// # Panics + /// + /// It will panic if the `local_address` has a zero port. + pub async fn send_announce_request( + &self, + transaction_id: TransactionId, + connection_id: ConnectionId, + info_hash: TorrustInfoHash, + ) -> Result { + tracing::debug!("Sending announce request with transaction id: {transaction_id:#?}"); + + let port = NonZeroU16::new( + self.client + .client + .socket + .local_addr() + .expect("it should get the local address") + .port(), + ) + .expect("it should no be zero"); + + let announce_request = AnnounceRequest { + connection_id, + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id, + info_hash: InfoHash(info_hash.bytes()), + peer_id: PeerId(*b"-qB00000000000000001"), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers(1i32.into()), + port: Port::new(port), + }; + + let _ = self + .client + .send(announce_request.into()) + .await + .map_err(|err| Error::UnableToSendAnnounceRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveAnnounceResponse { err })?; + + Ok(response) + } + + /// Sends a scrape request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + pub async fn send_scrape_request( + &self, + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hashes: &[TorrustInfoHash], + ) -> Result { + tracing::debug!("Sending scrape request with transaction id: {transaction_id:#?}"); + + let scrape_request = ScrapeRequest { + connection_id, + transaction_id, + info_hashes: info_hashes + .iter() + .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) + .collect(), + }; + + let _ = self + .client + .send(scrape_request.into()) + .await + .map_err(|err| Error::UnableToSendScrapeRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveScrapeResponse { err })?; + + Ok(response) + } +} diff --git a/src/console/clients/udp/mod.rs b/src/console/clients/udp/mod.rs new file mode 100644 index 000000000..b92bed096 --- /dev/null +++ b/src/console/clients/udp/mod.rs @@ -0,0 +1,51 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::Response; +use serde::Serialize; +use thiserror::Error; + +use crate::shared::bit_torrent::tracker::udp; + +pub mod app; +pub mod checker; +pub mod responses; + +#[derive(Error, Debug, Clone, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Connect to: {remote_addr}, with error: {err}")] + UnableToBindAndConnect { remote_addr: SocketAddr, err: udp::Error }, + + #[error("Failed to send a connection request, with error: {err}")] + UnableToSendConnectionRequest { err: udp::Error }, + + #[error("Failed to receive a connect response, with error: {err}")] + UnableToReceiveConnectResponse { err: udp::Error }, + + #[error("Failed to send a announce request, with error: {err}")] + UnableToSendAnnounceRequest { err: udp::Error }, + + #[error("Failed to receive a announce response, with error: {err}")] + UnableToReceiveAnnounceResponse { err: udp::Error }, + + #[error("Failed to send a scrape request, with error: {err}")] + UnableToSendScrapeRequest { err: udp::Error }, + + #[error("Failed to receive a scrape response, with error: {err}")] + UnableToReceiveScrapeResponse { err: udp::Error }, + + #[error("Failed to receive a response, with error: {err}")] + UnableToReceiveResponse { err: udp::Error }, + + #[error("Failed to get local address for connection: {err}")] + UnableToGetLocalAddr { err: udp::Error }, + + #[error("Failed to get a connection response: {response:?}")] + UnexpectedConnectionResponse { response: Response }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/src/console/clients/udp/responses/dto.rs b/src/console/clients/udp/responses/dto.rs new file mode 100644 index 000000000..93320b0f7 --- /dev/null +++ b/src/console/clients/udp/responses/dto.rs @@ -0,0 +1,128 @@ +//! Aquatic responses are not serializable. These are the serializable wrappers. +use std::net::{Ipv4Addr, Ipv6Addr}; + +use aquatic_udp_protocol::Response::{self}; +use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; +use serde::Serialize; + +#[derive(Serialize)] +pub enum SerializableResponse { + Connect(ConnectSerializableResponse), + AnnounceIpv4(AnnounceSerializableResponse), + AnnounceIpv6(AnnounceSerializableResponse), + Scrape(ScrapeSerializableResponse), + Error(ErrorSerializableResponse), +} + +impl From for SerializableResponse { + fn from(response: Response) -> Self { + match response { + Response::Connect(response) => SerializableResponse::Connect(ConnectSerializableResponse::from(response)), + Response::AnnounceIpv4(response) => SerializableResponse::AnnounceIpv4(AnnounceSerializableResponse::from(response)), + Response::AnnounceIpv6(response) => SerializableResponse::AnnounceIpv6(AnnounceSerializableResponse::from(response)), + Response::Scrape(response) => SerializableResponse::Scrape(ScrapeSerializableResponse::from(response)), + Response::Error(response) => SerializableResponse::Error(ErrorSerializableResponse::from(response)), + } + } +} + +#[derive(Serialize)] +pub struct ConnectSerializableResponse { + transaction_id: i32, + connection_id: i64, +} + +impl From for ConnectSerializableResponse { + fn from(connect: ConnectResponse) -> Self { + Self { + transaction_id: connect.transaction_id.0.into(), + connection_id: connect.connection_id.0.into(), + } + } +} + +#[derive(Serialize)] +pub struct AnnounceSerializableResponse { + transaction_id: i32, + announce_interval: i32, + leechers: i32, + seeders: i32, + peers: Vec, +} + +impl From> for AnnounceSerializableResponse { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", Ipv4Addr::from(peer.ip_address), peer.port.0)) + .collect::>(), + } + } +} + +impl From> for AnnounceSerializableResponse { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", Ipv6Addr::from(peer.ip_address), peer.port.0)) + .collect::>(), + } + } +} + +#[derive(Serialize)] +pub struct ScrapeSerializableResponse { + transaction_id: i32, + torrent_stats: Vec, +} + +impl From for ScrapeSerializableResponse { + fn from(scrape: ScrapeResponse) -> Self { + Self { + transaction_id: scrape.transaction_id.0.into(), + torrent_stats: scrape + .torrent_stats + .iter() + .map(|torrent_scrape_statistics| TorrentStats { + seeders: torrent_scrape_statistics.seeders.0.into(), + completed: torrent_scrape_statistics.completed.0.into(), + leechers: torrent_scrape_statistics.leechers.0.into(), + }) + .collect::>(), + } + } +} + +#[derive(Serialize)] +pub struct ErrorSerializableResponse { + transaction_id: i32, + message: String, +} + +impl From for ErrorSerializableResponse { + fn from(error: ErrorResponse) -> Self { + Self { + transaction_id: error.transaction_id.0.into(), + message: error.message.to_string(), + } + } +} + +#[derive(Serialize)] +struct TorrentStats { + seeders: i32, + completed: i32, + leechers: i32, +} diff --git a/src/console/clients/udp/responses/json.rs b/src/console/clients/udp/responses/json.rs new file mode 100644 index 000000000..5d2bd6b89 --- /dev/null +++ b/src/console/clients/udp/responses/json.rs @@ -0,0 +1,25 @@ +use anyhow::Context; +use serde::Serialize; + +use super::dto::SerializableResponse; + +#[allow(clippy::module_name_repetitions)] +pub trait ToJson { + /// + /// Returns a string with the JSON serialized version of the response + /// + /// # Errors + /// + /// Will return an error if serialization fails. + /// + fn to_json_string(&self) -> anyhow::Result + where + Self: Serialize, + { + let pretty_json = serde_json::to_string_pretty(self).context("response JSON serialization")?; + + Ok(pretty_json) + } +} + +impl ToJson for SerializableResponse {} diff --git a/src/console/clients/udp/responses/mod.rs b/src/console/clients/udp/responses/mod.rs new file mode 100644 index 000000000..e6d2e5e51 --- /dev/null +++ b/src/console/clients/udp/responses/mod.rs @@ -0,0 +1,2 @@ +pub mod dto; +pub mod json; diff --git a/src/console/mod.rs b/src/console/mod.rs new file mode 100644 index 000000000..dab338e4b --- /dev/null +++ b/src/console/mod.rs @@ -0,0 +1,4 @@ +//! Console apps. +pub mod ci; +pub mod clients; +pub mod profiling; diff --git a/src/console/profiling.rs b/src/console/profiling.rs new file mode 100644 index 000000000..5fb507197 --- /dev/null +++ b/src/console/profiling.rs @@ -0,0 +1,201 @@ +//! This binary is used for profiling with [valgrind](https://valgrind.org/) +//! and [kcachegrind](https://kcachegrind.github.io/). +//! +//! # Requirements +//! +//! [valgrind](https://valgrind.org/) and [kcachegrind](https://kcachegrind.github.io/). +//! +//! On Ubuntu you can install them with: +//! +//! ```text +//! sudo apt install valgrind kcachegrind +//! ``` +//! +//! > NOTICE: valgrind executes the program you wan to profile and waits until +//! > it ends. Since the tracker is a service and does not end the profiling +//! > binary accepts an arguments with the duration you want to run the tracker, +//! > so that it terminates automatically after that period of time. +//! +//! # Run profiling +//! +//! To run the profiling you have to: +//! +//! 1. Build and run the tracker for profiling. +//! 2. Run the aquatic UDP load test tool to start collecting data in the tracker. +//! +//! Build and run the tracker for profiling: +//! +//! ```text +//! RUSTFLAGS='-g' cargo build --release --bin profiling \ +//! && export TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" \ +//! && valgrind \ +//! --tool=callgrind \ +//! --callgrind-out-file=callgrind.out \ +//! --collect-jumps=yes \ +//! --simulate-cache=yes \ +//! ./target/release/profiling 60 +//! ``` +//! +//! The output should be something like: +//! +//! ```text +//! RUSTFLAGS='-g' cargo build --release --bin profiling \ +//! && export TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" \ +//! && valgrind \ +//! --tool=callgrind \ +//! --callgrind-out-file=callgrind.out \ +//! --collect-jumps=yes \ +//! --simulate-cache=yes \ +//! ./target/release/profiling 60 +//! +//! Compiling torrust-tracker v3.0.0-alpha.12-develop (/home/developer/Documents/git/committer/me/github/torrust/torrust-tracker) +//! Finished `release` profile [optimized + debuginfo] target(s) in 1m 15s +//! ==122801== Callgrind, a call-graph generating cache profiler +//! ==122801== Copyright (C) 2002-2017, and GNU GPL'd, by Josef Weidendorfer et al. +//! ==122801== Using Valgrind-3.19.0 and LibVEX; rerun with -h for copyright info +//! ==122801== Command: ./target/release/profiling 60 +//! ==122801== +//! --122801-- warning: L3 cache found, using its data for the LL simulation. +//! ==122801== For interactive control, run 'callgrind_control -h'. +//! Loading configuration file: `./share/default/config/tracker.udp.benchmarking.toml` ... +//! Torrust successfully shutdown. +//! ==122801== +//! ==122801== Events : Ir Dr Dw I1mr D1mr D1mw ILmr DLmr DLmw +//! ==122801== Collected : 1160654816 278135882 247755311 24453652 12650490 16315690 10932 2481624 4832145 +//! ==122801== +//! ==122801== I refs: 1,160,654,816 +//! ==122801== I1 misses: 24,453,652 +//! ==122801== LLi misses: 10,932 +//! ==122801== I1 miss rate: 2.11% +//! ==122801== LLi miss rate: 0.00% +//! ==122801== +//! ==122801== D refs: 525,891,193 (278,135,882 rd + 247,755,311 wr) +//! ==122801== D1 misses: 28,966,180 ( 12,650,490 rd + 16,315,690 wr) +//! ==122801== LLd misses: 7,313,769 ( 2,481,624 rd + 4,832,145 wr) +//! ==122801== D1 miss rate: 5.5% ( 4.5% + 6.6% ) +//! ==122801== LLd miss rate: 1.4% ( 0.9% + 2.0% ) +//! ==122801== +//! ==122801== LL refs: 53,419,832 ( 37,104,142 rd + 16,315,690 wr) +//! ==122801== LL misses: 7,324,701 ( 2,492,556 rd + 4,832,145 wr) +//! ==122801== LL miss rate: 0.4% ( 0.2% + 2.0% ) +//! ``` +//! +//! > NOTICE: We are using an specific tracker configuration for profiling that +//! > removes all features except the UDP tracker and sets the logging level to `error`. +//! +//! Build the aquatic UDP load test command: +//! +//! ```text +//! cd /tmp +//! git clone git@github.com:greatest-ape/aquatic.git +//! cd aquatic +//! cargo build --profile=release-debug -p aquatic_udp_load_test +//! ./target/release-debug/aquatic_udp_load_test -p > "load-test-config.toml" +//! ``` +//! +//! Modify the "load-test-config.toml" file to change the UDP tracker port from +//! `3000` to `6969`. +//! +//! Running the aquatic UDP load test command: +//! +//! ```text +//! ./target/release-debug/aquatic_udp_load_test -c "load-test-config.toml" +//! ``` +//! +//! The output should be something like this: +//! +//! ```text +//! Starting client with config: Config { +//! server_address: 127.0.0.1:6969, +//! log_level: Error, +//! workers: 1, +//! duration: 0, +//! summarize_last: 0, +//! extra_statistics: true, +//! network: NetworkConfig { +//! multiple_client_ipv4s: true, +//! sockets_per_worker: 4, +//! recv_buffer: 8000000, +//! }, +//! requests: RequestConfig { +//! number_of_torrents: 1000000, +//! number_of_peers: 2000000, +//! scrape_max_torrents: 10, +//! announce_peers_wanted: 30, +//! weight_connect: 50, +//! weight_announce: 50, +//! weight_scrape: 1, +//! peer_seeder_probability: 0.75, +//! }, +//! } +//! +//! Requests out: 45097.51/second +//! Responses in: 4212.70/second +//! - Connect responses: 2098.15 +//! - Announce responses: 2074.95 +//! - Scrape responses: 39.59 +//! - Error responses: 0.00 +//! Peers per announce response: 0.00 +//! Announce responses per info hash: +//! - p10: 1 +//! - p25: 1 +//! - p50: 1 +//! - p75: 2 +//! - p90: 3 +//! - p95: 4 +//! - p99: 6 +//! - p99.9: 8 +//! - p100: 10 +//! ``` +//! +//! After running the tracker for some seconds the tracker will automatically stop +//! and `valgrind`will write the file `callgrind.out` with the data. +//! +//! You can now analyze the collected data with: +//! +//! ```text +//! kcachegrind callgrind.out +//! ``` +use std::env; +use std::time::Duration; + +use tokio::time::sleep; + +use crate::{app, bootstrap}; + +pub async fn run() { + // Parse command line arguments + let args: Vec = env::args().collect(); + + // Ensure an argument for duration is provided + if args.len() != 2 { + eprintln!("Usage: {} ", args[0]); + return; + } + + // Parse duration argument + let Ok(duration_secs) = args[1].parse::() else { + eprintln!("Invalid duration provided"); + return; + }; + + let (config, tracker) = bootstrap::app::setup(); + + let jobs = app::start(&config, tracker).await; + + // Run the tracker for a fixed duration + let run_duration = sleep(Duration::from_secs(duration_secs)); + + tokio::select! { + () = run_duration => { + tracing::info!("Torrust timed shutdown.."); + }, + _ = tokio::signal::ctrl_c() => { + tracing::info!("Torrust shutting down via Ctrl+C ..."); + // Await for all jobs to shutdown + futures::future::join_all(jobs).await; + } + } + + println!("Torrust successfully shutdown."); +} diff --git a/src/core/auth.rs b/src/core/auth.rs new file mode 100644 index 000000000..0243fceb4 --- /dev/null +++ b/src/core/auth.rs @@ -0,0 +1,346 @@ +//! Tracker authentication services and structs. +//! +//! This module contains functions to handle tracker keys. +//! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs +//! in `private` or `private_listed` modes. +//! +//! There are services to [`generate_key`] and [`verify_key_expiration`] authentication keys. +//! +//! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means +//! they are only valid during a period of time. After that time the expiring key will no longer be valid. +//! +//! Keys are stored in this struct: +//! +//! ```rust,no_run +//! use torrust_tracker::core::auth::Key; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! +//! pub struct ExpiringKey { +//! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` +//! pub key: Key, +//! /// Timestamp, the key will be no longer valid after this timestamp +//! pub valid_until: Option, +//! } +//! ``` +//! +//! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: +//! +//! ```rust,no_run +//! use torrust_tracker::core::auth; +//! use std::time::Duration; +//! +//! let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); +//! +//! // And you can later verify it with: +//! +//! assert!(auth::verify_key_expiration(&expiring_key).is_ok()); +//! ``` + +use std::panic::Location; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use derive_more::Display; +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; +use torrust_tracker_located_error::{DynError, LocatedError}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::CurrentClock; + +/// It generates a new permanent random key [`PeerKey`]. +#[must_use] +pub fn generate_permanent_key() -> PeerKey { + generate_key(None) +} + +/// It generates a new random 32-char authentication [`PeerKey`]. +/// +/// It can be an expiring or permanent key. +/// +/// # Panics +/// +/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. +/// +/// # Arguments +/// +/// * `lifetime`: if `None` the key will be permanent. +#[must_use] +pub fn generate_key(lifetime: Option) -> PeerKey { + let random_id: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(AUTH_KEY_LENGTH) + .map(char::from) + .collect(); + + if let Some(lifetime) = lifetime { + tracing::debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); + + PeerKey { + key: random_id.parse::().unwrap(), + valid_until: Some(CurrentClock::now_add(&lifetime).unwrap()), + } + } else { + tracing::debug!("Generated key: {}, permanent", random_id); + + PeerKey { + key: random_id.parse::().unwrap(), + valid_until: None, + } + } +} + +/// It verifies an [`PeerKey`]. It checks if the expiration date has passed. +/// Permanent keys without duration (`None`) do not expire. +/// +/// # Errors +/// +/// Will return: +/// +/// - `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. +/// - `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. +pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { + let current_time: DurationSinceUnixEpoch = CurrentClock::now(); + + match auth_key.valid_until { + Some(valid_until) => { + if valid_until < current_time { + Err(Error::KeyExpired { + location: Location::caller(), + }) + } else { + Ok(()) + } + } + None => Ok(()), // Permanent key + } +} + +/// An authentication key which can potentially have an expiration time. +/// After that time is will automatically become invalid. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] +pub struct PeerKey { + /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` + pub key: Key, + + /// Timestamp, the key will be no longer valid after this timestamp. + /// If `None` the keys will not expire (permanent key). + pub valid_until: Option, +} + +impl std::fmt::Display for PeerKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.expiry_time() { + Some(expire_time) => write!(f, "key: `{}`, valid until `{}`", self.key, expire_time), + None => write!(f, "key: `{}`, permanent", self.key), + } + } +} + +impl PeerKey { + #[must_use] + pub fn key(&self) -> Key { + self.key.clone() + } + + /// It returns the expiry time. For example, for the starting time for Unix Epoch + /// (timestamp 0) it will return a `DateTime` whose string representation is + /// `1970-01-01 00:00:00 UTC`. + /// + /// # Panics + /// + /// Will panic when the key timestamp overflows the internal i64 type. + /// (this will naturally happen in 292.5 billion years) + #[must_use] + pub fn expiry_time(&self) -> Option> { + self.valid_until.map(convert_from_timestamp_to_datetime_utc) + } +} + +/// A token used for authentication. +/// +/// - It contains only ascii alphanumeric chars: lower and uppercase letters and +/// numbers. +/// - It's a 32-char string. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] +pub struct Key(String); + +impl Key { + /// # Errors + /// + /// Will return an error is the string represents an invalid key. + /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. + pub fn new(value: &str) -> Result { + if value.len() != AUTH_KEY_LENGTH { + return Err(ParseKeyError::InvalidKeyLength); + } + + if !value.chars().all(|c| c.is_ascii_alphanumeric()) { + return Err(ParseKeyError::InvalidChars); + } + + Ok(Self(value.to_owned())) + } + + #[must_use] + pub fn value(&self) -> &str { + &self.0 + } +} + +/// Error returned when a key cannot be parsed from a string. +/// +/// ```rust,no_run +/// use torrust_tracker::core::auth::Key; +/// use std::str::FromStr; +/// +/// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; +/// let key = Key::from_str(key_string); +/// +/// assert!(key.is_ok()); +/// assert_eq!(key.unwrap().to_string(), key_string); +/// ``` +/// +/// If the string does not contains a valid key, the parser function will return +/// this error. +#[derive(Debug, Error)] +pub enum ParseKeyError { + #[error("Invalid key length. Key must be have 32 chars")] + InvalidKeyLength, + #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] + InvalidChars, +} + +impl FromStr for Key { + type Err = ParseKeyError; + + fn from_str(s: &str) -> Result { + Key::new(s)?; + Ok(Self(s.to_string())) + } +} + +/// Verification error. Error returned when an [`PeerKey`] cannot be +/// verified with the (`crate::core::auth::verify_key`) function. +#[derive(Debug, Error)] +#[allow(dead_code)] +pub enum Error { + #[error("Key could not be verified: {source}")] + KeyVerificationError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + #[error("Failed to read key: {key}, {location}")] + UnableToReadKey { + location: &'static Location<'static>, + key: Box, + }, + #[error("Key has expired, {location}")] + KeyExpired { location: &'static Location<'static> }, +} + +impl From for Error { + fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { + Error::KeyVerificationError { + source: (Arc::new(e) as DynError).into(), + } + } +} + +#[cfg(test)] +mod tests { + + mod key { + use std::str::FromStr; + + use crate::core::auth::Key; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let key = Key::from_str(key_string); + + assert!(key.is_ok()); + assert_eq!(key.unwrap().to_string(), key_string); + } + + #[test] + fn length_should_be_32() { + let key = Key::new(""); + assert!(key.is_err()); + + let string_longer_than_32 = "012345678901234567890123456789012"; // DevSkim: ignore DS173237 + let key = Key::new(string_longer_than_32); + assert!(key.is_err()); + } + + #[test] + fn should_only_include_alphanumeric_chars() { + let key = Key::new("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"); + assert!(key.is_err()); + } + } + + mod expiring_auth_key { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker_clock::clock; + use torrust_tracker_clock::clock::stopped::Stopped as _; + + use crate::core::auth; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let auth_key = auth::Key::from_str(key_string); + + assert!(auth_key.is_ok()); + assert_eq!(auth_key.unwrap().to_string(), key_string); + } + + #[test] + fn should_be_displayed() { + // Set the time to the current time. + clock::Stopped::local_set_to_unix_epoch(); + + let expiring_key = auth::generate_key(Some(Duration::from_secs(0))); + + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line + ); + } + + #[test] + fn should_be_generated_with_a_expiration_time() { + let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); + + assert!(auth::verify_key_expiration(&expiring_key).is_ok()); + } + + #[test] + fn should_be_generate_and_verified() { + // Set the time to the current time. + clock::Stopped::local_set_to_system_time_now(); + + // Make key that is valid for 19 seconds. + let expiring_key = auth::generate_key(Some(Duration::from_secs(19))); + + // Mock the time has passed 10 sec. + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify_key_expiration(&expiring_key).is_ok()); + + // Mock the time has passed another 10 sec. + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify_key_expiration(&expiring_key).is_err()); + } + } +} diff --git a/src/core/databases/driver.rs b/src/core/databases/driver.rs new file mode 100644 index 000000000..a456a2650 --- /dev/null +++ b/src/core/databases/driver.rs @@ -0,0 +1,73 @@ +//! Database driver factory. +//! +//! See [`databases::driver::build`](crate::core::databases::driver::build) +//! function for more information. +use serde::{Deserialize, Serialize}; + +use super::error::Error; +use super::mysql::Mysql; +use super::sqlite::Sqlite; +use super::{Builder, Database}; + +/// The database management system used by the tracker. +/// +/// Refer to: +/// +/// - [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration). +/// - [Torrust Tracker](https://docs.rs/torrust-tracker). +/// +/// For more information about persistence. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] +pub enum Driver { + /// The Sqlite3 database driver. + Sqlite3, + /// The `MySQL` database driver. + MySQL, +} + +/// It builds a new database driver. +/// +/// Example for `SQLite3`: +/// +/// ```rust,no_run +/// use torrust_tracker::core::databases; +/// use torrust_tracker::core::databases::driver::Driver; +/// +/// let db_driver = Driver::Sqlite3; +/// let db_path = "./storage/tracker/lib/database/sqlite3.db".to_string(); +/// let database = databases::driver::build(&db_driver, &db_path); +/// ``` +/// +/// Example for `MySQL`: +/// +/// ```rust,no_run +/// use torrust_tracker::core::databases; +/// use torrust_tracker::core::databases::driver::Driver; +/// +/// let db_driver = Driver::MySQL; +/// let db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker".to_string(); +/// let database = databases::driver::build(&db_driver, &db_path); +/// ``` +/// +/// Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +/// for more information about the database configuration. +/// +/// > **WARNING**: The driver instantiation runs database migrations. +/// +/// # Errors +/// +/// This function will return an error if unable to connect to the database. +/// +/// # Panics +/// +/// This function will panic if unable to create database tables. +pub fn build(driver: &Driver, db_path: &str) -> Result, Error> { + let database = match driver { + Driver::Sqlite3 => Builder::::build(db_path), + Driver::MySQL => Builder::::build(db_path), + }?; + + database.create_database_tables().expect("Could not create database tables."); + + Ok(database) +} diff --git a/src/core/databases/error.rs b/src/core/databases/error.rs new file mode 100644 index 000000000..4d64baf48 --- /dev/null +++ b/src/core/databases/error.rs @@ -0,0 +1,104 @@ +//! Database errors. +//! +//! This module contains the [Database errors](crate::core::databases::error::Error). +use std::panic::Location; +use std::sync::Arc; + +use r2d2_mysql::mysql::UrlError; +use torrust_tracker_located_error::{DynError, Located, LocatedError}; + +use super::driver::Driver; + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + /// The query unexpectedly returned nothing. + #[error("The {driver} query unexpectedly returned nothing: {source}")] + QueryReturnedNoRows { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + driver: Driver, + }, + + /// The query was malformed. + #[error("The {driver} query was malformed: {source}")] + InvalidQuery { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + driver: Driver, + }, + + /// Unable to insert a record into the database + #[error("Unable to insert record into {driver} database, {location}")] + InsertFailed { + location: &'static Location<'static>, + driver: Driver, + }, + + /// Unable to delete a record into the database + #[error("Failed to remove record from {driver} database, error-code: {error_code}, {location}")] + DeleteFailed { + location: &'static Location<'static>, + error_code: usize, + driver: Driver, + }, + + /// Unable to connect to the database + #[error("Failed to connect to {driver} database: {source}")] + ConnectionError { + source: LocatedError<'static, UrlError>, + driver: Driver, + }, + + /// Unable to create a connection pool + #[error("Failed to create r2d2 {driver} connection pool: {source}")] + ConnectionPool { + source: LocatedError<'static, r2d2::Error>, + driver: Driver, + }, +} + +impl From for Error { + #[track_caller] + fn from(err: r2d2_sqlite::rusqlite::Error) -> Self { + match err { + r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows { + source: (Arc::new(err) as DynError).into(), + driver: Driver::Sqlite3, + }, + _ => Error::InvalidQuery { + source: (Arc::new(err) as DynError).into(), + driver: Driver::Sqlite3, + }, + } + } +} + +impl From for Error { + #[track_caller] + fn from(err: r2d2_mysql::mysql::Error) -> Self { + let e: DynError = Arc::new(err); + Error::InvalidQuery { + source: e.into(), + driver: Driver::MySQL, + } + } +} + +impl From for Error { + #[track_caller] + fn from(err: UrlError) -> Self { + Self::ConnectionError { + source: Located(err).into(), + driver: Driver::MySQL, + } + } +} + +impl From<(r2d2::Error, Driver)> for Error { + #[track_caller] + fn from(e: (r2d2::Error, Driver)) -> Self { + let (err, driver) = e; + Self::ConnectionPool { + source: Located(err).into(), + driver, + } + } +} diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs new file mode 100644 index 000000000..f559eb80e --- /dev/null +++ b/src/core/databases/mod.rs @@ -0,0 +1,229 @@ +//! The persistence module. +//! +//! Persistence is currently implemented with one [`Database`] trait. +//! +//! There are two implementations of the trait (two drivers): +//! +//! - [`Mysql`](crate::core::databases::mysql::Mysql) +//! - [`Sqlite`](crate::core::databases::sqlite::Sqlite) +//! +//! > **NOTICE**: There are no database migrations. If there are any changes, +//! > we will implemented them or provide a script to migrate to the new schema. +//! +//! The persistent objects are: +//! +//! - [Torrent metrics](#torrent-metrics) +//! - [Torrent whitelist](torrent-whitelist) +//! - [Authentication keys](authentication-keys) +//! +//! # Torrent metrics +//! +//! Field | Sample data | Description +//! ---|---|--- +//! `id` | 1 | Autoincrement id +//! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 +//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](torrust_tracker_torrent_repository::entry::Entry) for more information. +//! +//! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be +//! > regenerated again after some minutes. +//! +//! # Torrent whitelist +//! +//! Field | Sample data | Description +//! ---|---|--- +//! `id` | 1 | Autoincrement id +//! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 +//! +//! # Authentication keys +//! +//! Field | Sample data | Description +//! ---|---|--- +//! `id` | 1 | Autoincrement id +//! `key` | `IrweYtVuQPGbG9Jzx1DihcPmJGGpVy82` | Token +//! `valid_until` | 1672419840 | Timestamp for the expiring date +//! +//! > **NOTICE**: All keys must have an expiration date. +pub mod driver; +pub mod error; +pub mod mysql; +pub mod sqlite; + +use std::marker::PhantomData; + +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::PersistentTorrents; + +use self::error::Error; +use crate::core::auth::{self, Key}; + +struct Builder +where + T: Database, +{ + phantom: PhantomData, +} + +impl Builder +where + T: Database + 'static, +{ + /// . + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create a database. + pub(self) fn build(db_path: &str) -> Result, Error> { + Ok(Box::new(T::new(db_path)?)) + } +} + +/// The persistence trait. It contains all the methods to interact with the database. +pub trait Database: Sync + Send { + /// It instantiates a new database driver. + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create a database. + fn new(db_path: &str) -> Result + where + Self: std::marker::Sized; + + // Schema + + /// It generates the database tables. SQL queries are hardcoded in the trait + /// implementation. + /// + /// # Context: Schema + /// + /// # Errors + /// + /// Will return `Error` if unable to create own tables. + fn create_database_tables(&self) -> Result<(), Error>; + + /// It drops the database tables. + /// + /// # Context: Schema + /// + /// # Errors + /// + /// Will return `Err` if unable to drop tables. + fn drop_database_tables(&self) -> Result<(), Error>; + + // Torrent Metrics + + /// It loads the torrent metrics data from the database. + /// + /// It returns an array of tuples with the torrent + /// [`InfoHash`] and the + /// [`downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded) counter + /// which is the number of times the torrent has been downloaded. + /// See [`Entry::downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded). + /// + /// # Context: Torrent Metrics + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + fn load_persistent_torrents(&self) -> Result; + + /// It saves the torrent metrics data into the database. + /// + /// # Context: Torrent Metrics + /// + /// # Errors + /// + /// Will return `Err` if unable to save. + fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; + + // Whitelist + + /// It loads the whitelisted torrents from the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + fn load_whitelist(&self) -> Result, Error>; + + /// It checks if the torrent is whitelisted. + /// + /// It returns `Some(InfoHash)` if the torrent is whitelisted, `None` otherwise. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error>; + + /// It adds the torrent to the whitelist. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to save. + fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; + + /// It checks if the torrent is whitelisted. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + fn is_info_hash_whitelisted(&self, info_hash: InfoHash) -> Result { + Ok(self.get_info_hash_from_whitelist(info_hash)?.is_some()) + } + + /// It removes the torrent from the whitelist. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return `Err` if unable to save. + fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + + // Authentication keys + + /// It loads the expiring authentication keys from the database. + /// + /// # Context: Authentication Keys + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + fn load_keys(&self) -> Result, Error>; + + /// It gets an expiring authentication key from the database. + /// + /// It returns `Some(PeerKey)` if a [`PeerKey`](crate::core::auth::PeerKey) + /// with the input [`Key`] exists, `None` otherwise. + /// + /// # Context: Authentication Keys + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + fn get_key_from_keys(&self, key: &Key) -> Result, Error>; + + /// It adds an expiring authentication key to the database. + /// + /// # Context: Authentication Keys + /// + /// # Errors + /// + /// Will return `Err` if unable to save. + fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result; + + /// It removes an expiring authentication key from the database. + /// + /// # Context: Authentication Keys + /// + /// # Errors + /// + /// Will return `Err` if unable to load. + fn remove_key_from_keys(&self, key: &Key) -> Result; +} diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs new file mode 100644 index 000000000..28a5f363b --- /dev/null +++ b/src/core/databases/mysql.rs @@ -0,0 +1,253 @@ +//! The `MySQL` database driver. +use std::str::FromStr; +use std::time::Duration; + +use r2d2::Pool; +use r2d2_mysql::mysql::prelude::Queryable; +use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; +use r2d2_mysql::MySqlConnectionManager; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::PersistentTorrents; + +use super::driver::Driver; +use super::{Database, Error}; +use crate::core::auth::{self, Key}; +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; + +const DRIVER: Driver = Driver::MySQL; + +pub struct Mysql { + pool: Pool, +} + +impl Database for Mysql { + /// It instantiates a new `MySQL` database driver. + /// + /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `MySQL` database. + fn new(db_path: &str) -> Result { + let opts = Opts::from_url(db_path)?; + let builder = OptsBuilder::from_opts(opts); + let manager = MySqlConnectionManager::new(builder); + let pool = r2d2::Pool::builder().build(manager).map_err(|e| (e, DRIVER))?; + + Ok(Self { pool }) + } + + /// Refer to [`databases::Database::create_database_tables`](crate::core::databases::Database::create_database_tables). + fn create_database_tables(&self) -> Result<(), Error> { + let create_whitelist_table = " + CREATE TABLE IF NOT EXISTS whitelist ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE + );" + .to_string(); + + let create_torrents_table = " + CREATE TABLE IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + + let create_keys_table = format!( + " + CREATE TABLE IF NOT EXISTS `keys` ( + `id` INT NOT NULL AUTO_INCREMENT, + `key` VARCHAR({}) NOT NULL, + `valid_until` INT(10), + PRIMARY KEY (`id`), + UNIQUE (`key`) + );", + i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") + ); + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.query_drop(&create_torrents_table) + .expect("Could not create torrents table."); + conn.query_drop(&create_keys_table).expect("Could not create keys table."); + conn.query_drop(&create_whitelist_table) + .expect("Could not create whitelist table."); + + Ok(()) + } + + /// Refer to [`databases::Database::drop_database_tables`](crate::core::databases::Database::drop_database_tables). + fn drop_database_tables(&self) -> Result<(), Error> { + let drop_whitelist_table = " + DROP TABLE `whitelist`;" + .to_string(); + + let drop_torrents_table = " + DROP TABLE `torrents`;" + .to_string(); + + let drop_keys_table = " + DROP TABLE `keys`;" + .to_string(); + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.query_drop(&drop_whitelist_table) + .expect("Could not drop `whitelist` table."); + conn.query_drop(&drop_torrents_table) + .expect("Could not drop `torrents` table."); + conn.query_drop(&drop_keys_table).expect("Could not drop `keys` table."); + + Ok(()) + } + + /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). + fn load_persistent_torrents(&self) -> Result { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let torrents = conn.query_map( + "SELECT info_hash, completed FROM torrents", + |(info_hash_string, completed): (String, u32)| { + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + (info_hash, completed) + }, + )?; + + Ok(torrents.iter().copied().collect()) + } + + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). + fn load_keys(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let keys = conn.query_map( + "SELECT `key`, valid_until FROM `keys`", + |(key, valid_until): (String, Option)| match valid_until { + Some(valid_until) => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + }, + None => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }, + }, + )?; + + Ok(keys) + } + + /// Refer to [`databases::Database::load_whitelist`](crate::core::databases::Database::load_whitelist). + fn load_whitelist(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hashes = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { + InfoHash::from_str(&info_hash).unwrap() + })?; + + Ok(info_hashes) + } + + /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). + fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hash_str = info_hash.to_string(); + + tracing::debug!("{}", info_hash_str); + + Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) + } + + /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). + fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let select = conn.exec_first::( + "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", + params! { "info_hash" => info_hash.to_hex_string() }, + )?; + + let info_hash = select.map(|f| InfoHash::from_str(&f).expect("Failed to decode InfoHash String from DB!")); + + Ok(info_hash) + } + + /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::core::databases::Database::add_info_hash_to_whitelist). + fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hash_str = info_hash.to_string(); + + conn.exec_drop( + "INSERT INTO whitelist (info_hash) VALUES (:info_hash_str)", + params! { info_hash_str }, + )?; + + Ok(1) + } + + /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::core::databases::Database::remove_info_hash_from_whitelist). + fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hash = info_hash.to_string(); + + conn.exec_drop("DELETE FROM whitelist WHERE info_hash = :info_hash", params! { info_hash })?; + + Ok(1) + } + + /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let query = conn.exec_first::<(String, Option), _, _>( + "SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", + params! { "key" => key.to_string() }, + ); + + let key = query?; + + Ok(key.map(|(key, opt_valid_until)| match opt_valid_until { + Some(valid_until) => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + }, + None => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }, + })) + } + + /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). + fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let key = auth_key.key.to_string(); + let valid_until = match auth_key.valid_until { + Some(valid_until) => valid_until.as_secs().to_string(), + None => todo!(), + }; + + conn.exec_drop( + "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", + params! { key, valid_until }, + )?; + + Ok(1) + } + + /// Refer to [`databases::Database::remove_key_from_keys`](crate::core::databases::Database::remove_key_from_keys). + fn remove_key_from_keys(&self, key: &Key) -> Result { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { "key" => key.to_string() })?; + + Ok(1) + } +} diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs new file mode 100644 index 000000000..69470ee04 --- /dev/null +++ b/src/core/databases/sqlite.rs @@ -0,0 +1,287 @@ +//! The `SQLite3` database driver. +use std::panic::Location; +use std::str::FromStr; + +use r2d2::Pool; +use r2d2_sqlite::rusqlite::params; +use r2d2_sqlite::rusqlite::types::Null; +use r2d2_sqlite::SqliteConnectionManager; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; + +use super::driver::Driver; +use super::{Database, Error}; +use crate::core::auth::{self, Key}; + +const DRIVER: Driver = Driver::Sqlite3; + +pub struct Sqlite { + pool: Pool, +} + +impl Database for Sqlite { + /// It instantiates a new `SQLite3` database driver. + /// + /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). + /// + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. + fn new(db_path: &str) -> Result { + let cm = SqliteConnectionManager::file(db_path); + Pool::new(cm).map_or_else(|err| Err((err, Driver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) + } + + /// Refer to [`databases::Database::create_database_tables`](crate::core::databases::Database::create_database_tables). + fn create_database_tables(&self) -> Result<(), Error> { + let create_whitelist_table = " + CREATE TABLE IF NOT EXISTS whitelist ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE + );" + .to_string(); + + let create_torrents_table = " + CREATE TABLE IF NOT EXISTS torrents ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + + let create_keys_table = " + CREATE TABLE IF NOT EXISTS keys ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER + );" + .to_string(); + + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.execute(&create_whitelist_table, [])?; + conn.execute(&create_keys_table, [])?; + conn.execute(&create_torrents_table, [])?; + + Ok(()) + } + + /// Refer to [`databases::Database::drop_database_tables`](crate::core::databases::Database::drop_database_tables). + fn drop_database_tables(&self) -> Result<(), Error> { + let drop_whitelist_table = " + DROP TABLE whitelist;" + .to_string(); + + let drop_torrents_table = " + DROP TABLE torrents;" + .to_string(); + + let drop_keys_table = " + DROP TABLE keys;" + .to_string(); + + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + conn.execute(&drop_whitelist_table, []) + .and_then(|_| conn.execute(&drop_torrents_table, [])) + .and_then(|_| conn.execute(&drop_keys_table, []))?; + + Ok(()) + } + + /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). + fn load_persistent_torrents(&self) -> Result { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; + + let torrent_iter = stmt.query_map([], |row| { + let info_hash_string: String = row.get(0)?; + let info_hash = InfoHash::from_str(&info_hash_string).unwrap(); + let completed: u32 = row.get(1)?; + Ok((info_hash, completed)) + })?; + + Ok(torrent_iter.filter_map(std::result::Result::ok).collect()) + } + + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). + fn load_keys(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; + + let keys_iter = stmt.query_map([], |row| { + let key: String = row.get(0)?; + let opt_valid_until: Option = row.get(1)?; + + match opt_valid_until { + Some(valid_until) => Ok(auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), + }), + None => Ok(auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }), + } + })?; + + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + + Ok(keys) + } + + /// Refer to [`databases::Database::load_whitelist`](crate::core::databases::Database::load_whitelist). + fn load_whitelist(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; + + let info_hash_iter = stmt.query_map([], |row| { + let info_hash: String = row.get(0)?; + + Ok(InfoHash::from_str(&info_hash).unwrap()) + })?; + + let info_hashes: Vec = info_hash_iter.filter_map(std::result::Result::ok).collect(); + + Ok(info_hashes) + } + + /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). + fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute( + "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", + [info_hash.to_string(), completed.to_string()], + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(()) + } + } + + /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). + fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; + + let mut rows = stmt.query([info_hash.to_hex_string()])?; + + let query = rows.next()?; + + Ok(query.map(|f| InfoHash::from_str(&f.get_unwrap::<_, String>(0)).unwrap())) + } + + /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::core::databases::Database::add_info_hash_to_whitelist). + fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()])?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(insert) + } + } + + /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::core::databases::Database::remove_info_hash_from_whitelist). + fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let deleted = conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()])?; + + if deleted == 1 { + // should only remove a single record. + Ok(deleted) + } else { + Err(Error::DeleteFailed { + location: Location::caller(), + error_code: deleted, + driver: DRIVER, + }) + } + } + + /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; + + let mut rows = stmt.query([key.to_string()])?; + + let key = rows.next()?; + + Ok(key.map(|f| { + let valid_until: Option = f.get(1).unwrap(); + let key: String = f.get(0).unwrap(); + + match valid_until { + Some(valid_until) => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), + }, + None => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }, + } + })) + } + + /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). + fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = match auth_key.valid_until { + Some(valid_until) => conn.execute( + "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + [auth_key.key.to_string(), valid_until.as_secs().to_string()], + )?, + None => conn.execute( + "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + params![auth_key.key.to_string(), Null], + )?, + }; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(insert) + } + } + + /// Refer to [`databases::Database::remove_key_from_keys`](crate::core::databases::Database::remove_key_from_keys). + fn remove_key_from_keys(&self, key: &Key) -> Result { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key.to_string()])?; + + if deleted == 1 { + // should only remove a single record. + Ok(deleted) + } else { + Err(Error::DeleteFailed { + location: Location::caller(), + error_code: deleted, + driver: DRIVER, + }) + } + } +} diff --git a/src/core/error.rs b/src/core/error.rs new file mode 100644 index 000000000..d89b030c4 --- /dev/null +++ b/src/core/error.rs @@ -0,0 +1,55 @@ +//! Error returned by the core `Tracker`. +//! +//! Error | Context | Description +//! ---|---|--- +//! `PeerKeyNotValid` | Authentication | The supplied key is not valid. It may not be registered or expired. +//! `PeerNotAuthenticated` | Authentication | The peer did not provide the authentication key. +//! `TorrentNotWhitelisted` | Authorization | The action cannot be perform on a not-whitelisted torrent (it only applies for trackers running in `listed` or `private_listed` modes). +//! +use std::panic::Location; + +use torrust_tracker_located_error::LocatedError; +use torrust_tracker_primitives::info_hash::InfoHash; + +use super::auth::ParseKeyError; +use super::databases; + +/// Authentication or authorization error returned by the core `Tracker` +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + // Authentication errors + #[error("The supplied key: {key:?}, is not valid: {source}")] + PeerKeyNotValid { + key: super::auth::Key, + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("The peer is not authenticated, {location}")] + PeerNotAuthenticated { location: &'static Location<'static> }, + + // Authorization errors + #[error("The torrent: {info_hash}, is not whitelisted, {location}")] + TorrentNotWhitelisted { + info_hash: InfoHash, + location: &'static Location<'static>, + }, +} + +/// Errors related to peers keys. +#[allow(clippy::module_name_repetitions)] +#[derive(thiserror::Error, Debug, Clone)] +pub enum PeerKeyError { + #[error("Invalid peer key duration: {seconds_valid:?}, is not valid")] + DurationOverflow { seconds_valid: u64 }, + + #[error("Invalid key: {key}")] + InvalidKey { + key: String, + source: LocatedError<'static, ParseKeyError>, + }, + + #[error("Can't persist key: {source}")] + DatabaseError { + source: LocatedError<'static, databases::error::Error>, + }, +} diff --git a/src/core/mod.rs b/src/core/mod.rs new file mode 100644 index 000000000..f12eb9a3d --- /dev/null +++ b/src/core/mod.rs @@ -0,0 +1,2221 @@ +//! The core `tracker` module contains the generic `BitTorrent` tracker logic which is independent of the delivery layer. +//! +//! It contains the tracker services and their dependencies. It's a domain layer which does not +//! specify how the end user should connect to the `Tracker`. +//! +//! Typically this module is intended to be used by higher modules like: +//! +//! - A UDP tracker +//! - A HTTP tracker +//! - A tracker REST API +//! +//! ```text +//! Delivery layer Domain layer +//! +//! HTTP tracker | +//! UDP tracker |> Core tracker +//! Tracker REST API | +//! ``` +//! +//! # Table of contents +//! +//! - [Tracker](#tracker) +//! - [Announce request](#announce-request) +//! - [Scrape request](#scrape-request) +//! - [Torrents](#torrents) +//! - [Peers](#peers) +//! - [Configuration](#configuration) +//! - [Services](#services) +//! - [Authentication](#authentication) +//! - [Statistics](#statistics) +//! - [Persistence](#persistence) +//! +//! # Tracker +//! +//! The `Tracker` is the main struct in this module. `The` tracker has some groups of responsibilities: +//! +//! - **Core tracker**: it handles the information about torrents and peers. +//! - **Authentication**: it handles authentication keys which are used by HTTP trackers. +//! - **Authorization**: it handles the permission to perform requests. +//! - **Whitelist**: when the tracker runs in `listed` or `private_listed` mode all operations are restricted to whitelisted torrents. +//! - **Statistics**: it keeps and serves the tracker statistics. +//! +//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration) crate docs to get more information about the tracker settings. +//! +//! ## Announce request +//! +//! Handling `announce` requests is the most important task for a `BitTorrent` tracker. +//! +//! A `BitTorrent` swarm is a network of peers that are all trying to download the same torrent. +//! When a peer wants to find other peers it announces itself to the swarm via the tracker. +//! The peer sends its data to the tracker so that the tracker can add it to the swarm. +//! The tracker responds to the peer with the list of other peers in the swarm so that +//! the peer can contact them to start downloading pieces of the file from them. +//! +//! Once you have instantiated the `Tracker` you can `announce` a new [`peer::Peer`] with: +//! +//! ```rust,no_run +//! use std::net::SocketAddr; +//! use std::net::IpAddr; +//! use std::net::Ipv4Addr; +//! use std::str::FromStr; +//! +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_primitives::info_hash::InfoHash; +//! +//! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); +//! +//! let peer = peer::Peer { +//! peer_id: PeerId(*b"-qB00000000000000001"), +//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), +//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), +//! uploaded: NumberOfBytes::new(0), +//! downloaded: NumberOfBytes::new(0), +//! left: NumberOfBytes::new(0), +//! event: AnnounceEvent::Completed, +//! }; +//! +//! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); +//! ``` +//! +//! ```text +//! let announce_data = tracker.announce(&info_hash, &mut peer, &peer_ip).await; +//! ``` +//! +//! The `Tracker` returns the list of peers for the torrent with the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, +//! filtering out the peer that is making the `announce` request. +//! +//! > **NOTICE**: that the peer argument is mutable because the `Tracker` can change the peer IP if the peer is using a loopback IP. +//! +//! The `peer_ip` argument is the resolved peer ip. It's a common practice that trackers ignore the peer ip in the `announce` request params, +//! and resolve the peer ip using the IP of the client making the request. As the tracker is a domain service, the peer IP must be provided +//! for the `Tracker` user, which is usually a higher component with access the the request metadata, for example, connection data, proxy headers, +//! etcetera. +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_configuration::AnnouncePolicy; +//! +//! pub struct AnnounceData { +//! pub peers: Vec, +//! pub swarm_stats: SwarmMetadata, +//! pub policy: AnnouncePolicy, // the tracker announce policy. +//! } +//! +//! pub struct SwarmMetadata { +//! pub completed: u32, // The number of peers that have ever completed downloading +//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) +//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! +//! // Core tracker configuration +//! pub struct AnnounceInterval { +//! // ... +//! pub interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker +//! pub interval_min: u32, // Minimum announce interval. Clients must not reannounce more frequently than this +//! // ... +//! } +//! ``` +//! +//! Refer to `BitTorrent` BEPs and other sites for more information about the `announce` request: +//! +//! - [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Announce) +//! +//! ## Scrape request +//! +//! The `scrape` request allows clients to query metadata about the swarm in bulk. +//! +//! An `scrape` request includes a list of infohashes whose swarm metadata you want to collect. +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use std::collections::HashMap; +//! +//! pub struct ScrapeData { +//! pub files: HashMap, +//! } +//! +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! ``` +//! +//! The JSON representation of a sample `scrape` response would be like the following: +//! +//! ```json +//! { +//! 'files': { +//! 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +//! 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +//! } +//! } +//! ``` +//! +//! `xxxxxxxxxxxxxxxxxxxx` and `yyyyyyyyyyyyyyyyyyyy` are 20-byte infohash arrays. +//! There are two data structures for infohashes: byte arrays and hex strings: +//! +//! ```rust,no_run +//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use std::str::FromStr; +//! +//! let info_hash: InfoHash = [255u8; 20].into(); +//! +//! assert_eq!( +//! info_hash, +//! InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() +//! ); +//! ``` +//! Refer to `BitTorrent` BEPs and other sites for more information about the `scrape` request: +//! +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`. Scrape section](https://www.bittorrent.org/beps/bep_0015.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Scrape) +//! +//! ## Torrents +//! +//! The [`torrent`] module contains all the data structures stored by the `Tracker` except for peers. +//! +//! We can represent the data stored in memory internally by the `Tracker` with this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! The `Tracker` maintains an indexed-by-info-hash list of torrents. For each torrent, it stores a torrent `Entry`. +//! The torrent entry has two attributes: +//! +//! - `completed`: which is hte number of peers that have completed downloading the torrent file/s. As they have completed downloading, +//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". +//! - `peers`: an indexed and orderer list of peer for the torrent. Each peer contains the data received from the peer in the `announce` request. +//! +//! The [`torrent`] module not only contains the original data obtained from peer via `announce` requests, it also contains +//! aggregate data that can be derived from the original data. For example: +//! +//! ```rust,no_run +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! +//! ``` +//! +//! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". +//! +//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` +//! is used for the rest of cases. +//! +//! Refer to [`torrent`] module for more details about these data structures. +//! +//! ## Peers +//! +//! A `Peer` is the struct used by the `Tracker` to keep peers data: +//! +//! ```rust,no_run +//! use std::net::SocketAddr; + +//! use aquatic_udp_protocol::PeerId; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! use aquatic_udp_protocol::NumberOfBytes; +//! use aquatic_udp_protocol::AnnounceEvent; +//! +//! pub struct Peer { +//! pub peer_id: PeerId, // The peer ID +//! pub peer_addr: SocketAddr, // Peer socket address +//! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated +//! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far +//! pub downloaded: NumberOfBytes, // Number of bytes the peer has downloaded so far +//! pub left: NumberOfBytes, // The number of bytes this peer still has to download +//! pub event: AnnounceEvent, // The event the peer has announced: `started`, `completed`, `stopped` +//! } +//! ``` +//! +//! Notice that most of the attributes are obtained from the `announce` request. +//! For example, an HTTP announce request would contain the following `GET` parameters: +//! +//! +//! +//! The `Tracker` keeps an in-memory ordered data structure with all the torrents and a list of peers for each torrent, together with some swarm metrics. +//! +//! We can represent the data stored in memory with this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! That JSON object does not exist, it's only a representation of the `Tracker` torrents data. +//! +//! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers +//! that have a full version of the torrent data, also known as seeders. +//! +//! Refer to [`peer`] module for more information about peers. +//! +//! # Configuration +//! +//! You can control the behavior of this module with the module settings: +//! +//! ```toml +//! [logging] +//! threshold = "debug" +//! +//! [core] +//! inactive_peer_cleanup_interval = 600 +//! listed = false +//! private = false +//! tracker_usage_statistics = true +//! +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 +//! +//! [core.database] +//! driver = "sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! +//! [core.net] +//! on_reverse_proxy = false +//! external_ip = "2.137.87.41" +//! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true +//! ``` +//! +//! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. +//! +//! # Services +//! +//! Services are domain services on top of the core tracker. Right now there are two types of service: +//! +//! - For statistics +//! - For torrents +//! +//! Services usually format the data inside the tracker to make it easier to consume by other parts. +//! They also decouple the internal data structure, used by the tracker, from the way we deliver that data to the consumers. +//! The internal data structure is designed for performance or low memory consumption. And it should be changed +//! without affecting the external consumers. +//! +//! Services can include extra features like pagination, for example. +//! +//! Refer to [`services`] module for more information about services. +//! +//! # Authentication +//! +//! One of the core `Tracker` responsibilities is to create and keep authentication keys. Auth keys are used by HTTP trackers +//! when the tracker is running in `private` or `private_listed` mode. +//! +//! HTTP tracker's clients need to obtain an auth key before starting requesting the tracker. Once the get one they have to include +//! a `PATH` param with the key in all the HTTP requests. For example, when a peer wants to `announce` itself it has to use the +//! HTTP tracker endpoint `GET /announce/:key`. +//! +//! The common way to obtain the keys is by using the tracker API directly or via other applications like the [Torrust Index](https://github.com/torrust/torrust-index). +//! +//! To learn more about tracker authentication, refer to the following modules : +//! +//! - [`auth`] module. +//! - [`core`](crate::core) module. +//! - [`http`](crate::servers::http) module. +//! +//! # Statistics +//! +//! The `Tracker` keeps metrics for some events: +//! +//! ```rust,no_run +//! pub struct Metrics { +//! // IP version 4 +//! +//! // HTTP tracker +//! pub tcp4_connections_handled: u64, +//! pub tcp4_announces_handled: u64, +//! pub tcp4_scrapes_handled: u64, +//! +//! // UDP tracker +//! pub udp4_connections_handled: u64, +//! pub udp4_announces_handled: u64, +//! pub udp4_scrapes_handled: u64, +//! +//! // IP version 6 +//! +//! // HTTP tracker +//! pub tcp6_connections_handled: u64, +//! pub tcp6_announces_handled: u64, +//! pub tcp6_scrapes_handled: u64, +//! +//! // UDP tracker +//! pub udp6_connections_handled: u64, +//! pub udp6_announces_handled: u64, +//! pub udp6_scrapes_handled: u64, +//! } +//! ``` +//! +//! The metrics maintained by the `Tracker` are: +//! +//! - `connections_handled`: number of connections handled by the tracker +//! - `announces_handled`: number of `announce` requests handled by the tracker +//! - `scrapes_handled`: number of `scrape` handled requests by the tracker +//! +//! > **NOTICE**: as the HTTP tracker does not have an specific `connection` request like the UDP tracker, `connections_handled` are +//! > increased on every `announce` and `scrape` requests. +//! +//! The tracker exposes an event sender API that allows the tracker users to send events. When a higher application service handles a +//! `connection` , `announce` or `scrape` requests, it notifies the `Tracker` by sending statistics events. +//! +//! For example, the HTTP tracker would send an event like the following when it handles an `announce` request received from a peer using IP version 4. +//! +//! ```text +//! tracker.send_stats_event(statistics::Event::Tcp4Announce).await +//! ``` +//! +//! Refer to [`statistics`] module for more information about statistics. +//! +//! # Persistence +//! +//! Right now the `Tracker` is responsible for storing and load data into and +//! from the database, when persistence is enabled. +//! +//! There are three types of persistent object: +//! +//! - Authentication keys (only expiring keys) +//! - Torrent whitelist +//! - Torrent metrics +//! +//! Refer to [`databases`] module for more information about persistence. +pub mod auth; +pub mod databases; +pub mod error; +pub mod services; +pub mod statistics; +pub mod torrent; + +pub mod peer_tests; + +use std::cmp::max; +use std::collections::HashMap; +use std::net::IpAddr; +use std::panic::Location; +use std::sync::Arc; +use std::time::Duration; + +use auth::PeerKey; +use databases::driver::Driver; +use derive_more::Constructor; +use error::PeerKeyError; +use tokio::sync::mpsc::error::SendError; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::v2_0_0::database; +use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; +use torrust_tracker_located_error::Located; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; + +use self::auth::Key; +use self::error::Error; +use self::torrent::Torrents; +use crate::core::databases::Database; +use crate::CurrentClock; + +/// The domain layer tracker service. +/// +/// Its main responsibility is to handle the `announce` and `scrape` requests. +/// But it's also a container for the `Tracker` configuration, persistence, +/// authentication and other services. +/// +/// > **NOTICE**: the `Tracker` is not responsible for handling the network layer. +/// > Typically, the `Tracker` is used by a higher application service that handles +/// > the network layer. +pub struct Tracker { + /// The tracker configuration. + config: Core, + + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) + /// or [`MySQL`](crate::core::databases::mysql) + database: Arc>, + + /// Tracker users' keys. Only for private trackers. + keys: tokio::sync::RwLock>, + + /// The list of allowed torrents. Only for listed trackers. + whitelist: tokio::sync::RwLock>, + + /// The in-memory torrents repository. + torrents: Arc, + + /// Service to send stats events. + stats_event_sender: Option>, + + /// The in-memory stats repo. + stats_repository: statistics::Repo, +} + +/// Structure that holds the data returned by the `announce` request. +#[derive(Clone, Debug, PartialEq, Constructor, Default)] +pub struct AnnounceData { + /// The list of peers that are downloading the same torrent. + /// It excludes the peer that made the request. + pub peers: Vec>, + /// Swarm statistics + pub stats: SwarmMetadata, + pub policy: AnnouncePolicy, +} + +/// How many peers the peer announcing wants in the announce response. +#[derive(Clone, Debug, PartialEq, Default)] +pub enum PeersWanted { + /// The peer wants as many peers as possible in the announce response. + #[default] + All, + /// The peer only wants a certain amount of peers in the announce response. + Only { amount: usize }, +} + +impl PeersWanted { + #[must_use] + pub fn only(limit: u32) -> Self { + let amount: usize = match limit.try_into() { + Ok(amount) => amount, + Err(_) => TORRENT_PEERS_LIMIT, + }; + + Self::Only { amount } + } + + fn limit(&self) -> usize { + match self { + PeersWanted::All => TORRENT_PEERS_LIMIT, + PeersWanted::Only { amount } => *amount, + } + } +} + +impl From for PeersWanted { + fn from(value: i32) -> Self { + if value > 0 { + match value.try_into() { + Ok(peers_wanted) => Self::Only { amount: peers_wanted }, + Err(_) => Self::All, + } + } else { + Self::All + } + } +} + +/// Structure that holds the data returned by the `scrape` request. +#[derive(Debug, PartialEq, Default)] +pub struct ScrapeData { + /// A map of infohashes and swarm metadata for each torrent. + pub files: HashMap, +} + +impl ScrapeData { + /// Creates a new empty `ScrapeData` with no files (torrents). + #[must_use] + pub fn empty() -> Self { + let files: HashMap = HashMap::new(); + Self { files } + } + + /// Creates a new `ScrapeData` with zeroed metadata for each torrent. + #[must_use] + pub fn zeroed(info_hashes: &Vec) -> Self { + let mut scrape_data = Self::empty(); + + for info_hash in info_hashes { + scrape_data.add_file(info_hash, SwarmMetadata::zeroed()); + } + + scrape_data + } + + /// Adds a torrent to the `ScrapeData`. + pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) { + self.files.insert(*info_hash, swarm_metadata); + } + + /// Adds a torrent to the `ScrapeData` with zeroed metadata. + pub fn add_file_with_zeroed_metadata(&mut self, info_hash: &InfoHash) { + self.files.insert(*info_hash, SwarmMetadata::zeroed()); + } +} + +/// This type contains the info needed to add a new tracker key. +/// +/// You can upload a pre-generated key or let the app to generate a new one. +/// You can also set an expiration date or leave it empty (`None`) if you want +/// to create a permanent key that does not expire. +#[derive(Debug)] +pub struct AddKeyRequest { + /// The pre-generated key. Use `None` to generate a random key. + pub opt_key: Option, + + /// How long the key will be valid in seconds. Use `None` for permanent keys. + pub opt_seconds_valid: Option, +} + +impl Tracker { + /// `Tracker` constructor. + /// + /// # Errors + /// + /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. + pub fn new( + config: &Core, + stats_event_sender: Option>, + stats_repository: statistics::Repo, + ) -> Result { + let driver = match config.database.driver { + database::Driver::Sqlite3 => Driver::Sqlite3, + database::Driver::MySQL => Driver::MySQL, + }; + + let database = Arc::new(databases::driver::build(&driver, &config.database.path)?); + + Ok(Tracker { + config: config.clone(), + keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), + whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), + torrents: Arc::default(), + stats_event_sender, + stats_repository, + database, + }) + } + + /// Returns `true` is the tracker is in public mode. + pub fn is_public(&self) -> bool { + !self.config.private + } + + /// Returns `true` is the tracker is in private mode. + pub fn is_private(&self) -> bool { + self.config.private + } + + /// Returns `true` is the tracker is in whitelisted mode. + pub fn is_listed(&self) -> bool { + self.config.listed + } + + /// Returns `true` if the tracker requires authentication. + pub fn requires_authentication(&self) -> bool { + self.is_private() + } + + /// Returns `true` is the tracker is in whitelisted mode. + pub fn is_behind_reverse_proxy(&self) -> bool { + self.config.net.on_reverse_proxy + } + + pub fn get_announce_policy(&self) -> AnnouncePolicy { + self.config.announce_policy + } + + pub fn get_maybe_external_ip(&self) -> Option { + self.config.net.external_ip + } + + /// It handles an announce request. + /// + /// # Context: Tracker + /// + /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). + pub fn announce( + &self, + info_hash: &InfoHash, + peer: &mut peer::Peer, + remote_client_ip: &IpAddr, + peers_wanted: &PeersWanted, + ) -> AnnounceData { + // code-review: maybe instead of mutating the peer we could just return + // a tuple with the new peer and the announce data: (Peer, AnnounceData). + // It could even be a different struct: `StoredPeer` or `PublicPeer`. + + // code-review: in the `scrape` function we perform an authorization check. + // We check if the torrent is whitelisted. Should we also check authorization here? + // I think so because the `Tracker` has the responsibility for checking authentication and authorization. + // The `Tracker` has delegated that responsibility to the handlers + // (because we want to return a friendly error response) but that does not mean we should + // double-check authorization at this domain level too. + // I would propose to return a `Result` here. + // Besides, regarding authentication the `Tracker` is also responsible for authentication but + // we are actually handling authentication at the handlers level. So I would extract that + // responsibility into another authentication service. + + tracing::debug!("Before: {peer:?}"); + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); + tracing::debug!("After: {peer:?}"); + + let stats = self.upsert_peer_and_get_stats(info_hash, peer); + + let peers = self.get_peers_for(info_hash, peer, peers_wanted.limit()); + + AnnounceData { + peers, + stats, + policy: self.get_announce_policy(), + } + } + + /// It handles a scrape request. + /// + /// # Context: Tracker + /// + /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). + pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { + let mut scrape_data = ScrapeData::empty(); + + for info_hash in info_hashes { + let swarm_metadata = match self.authorize(info_hash).await { + Ok(()) => self.get_swarm_metadata(info_hash), + Err(_) => SwarmMetadata::zeroed(), + }; + scrape_data.add_file(info_hash, swarm_metadata); + } + + scrape_data + } + + /// It returns the data for a `scrape` response. + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + match self.torrents.get(info_hash) { + Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + None => SwarmMetadata::default(), + } + } + + /// It loads the torrents from database into memory. It only loads the torrent entry list with the number of seeders for each torrent. + /// Peers data is not persisted. + /// + /// # Context: Tracker + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. + pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + let persistent_torrents = self.database.load_persistent_torrents()?; + + self.torrents.import_persistent(&persistent_torrents); + + Ok(()) + } + + /// # Context: Tracker + /// + /// Get torrent peers for a given torrent and client. + /// + /// It filters out the client making the request. + fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + match self.torrents.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), + } + } + + /// # Context: Tracker + /// + /// Get torrent peers for a given torrent. + pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + match self.torrents.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), + } + } + + /// It updates the torrent entry in memory, it also stores in the database + /// the torrent info data which is persistent, and finally return the data + /// needed for a `announce` request response. + /// + /// # Context: Tracker + pub fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { + let swarm_metadata_before = match self.torrents.get_swarm_metadata(info_hash) { + Some(swarm_metadata) => swarm_metadata, + None => SwarmMetadata::zeroed(), + }; + + self.torrents.upsert_peer(info_hash, peer); + + let swarm_metadata_after = match self.torrents.get_swarm_metadata(info_hash) { + Some(swarm_metadata) => swarm_metadata, + None => SwarmMetadata::zeroed(), + }; + + if swarm_metadata_before != swarm_metadata_after { + self.persist_stats(info_hash, &swarm_metadata_after); + } + + swarm_metadata_after + } + + /// It stores the torrents stats into the database (if persistency is enabled). + /// + /// # Context: Tracker + fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { + if self.config.tracker_policy.persistent_torrent_completed_stat { + let completed = swarm_metadata.downloaded; + let info_hash = *info_hash; + + drop(self.database.save_persistent_torrent(&info_hash, completed)); + } + } + + /// It calculates and returns the general `Tracker` + /// [`TorrentsMetrics`] + /// + /// # Context: Tracker + /// + /// # Panics + /// Panics if unable to get the torrent metrics. + pub fn get_torrents_metrics(&self) -> TorrentsMetrics { + self.torrents.get_metrics() + } + + /// Remove inactive peers and (optionally) peerless torrents. + /// + /// # Context: Tracker + pub fn cleanup_torrents(&self) { + let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) + .unwrap_or_default(); + + self.torrents.remove_inactive_peers(current_cutoff); + + if self.config.tracker_policy.remove_peerless_torrents { + self.torrents.remove_peerless_torrents(&self.config.tracker_policy); + } + } + + /// It authenticates the peer `key` against the `Tracker` authentication + /// key list. + /// + /// # Errors + /// + /// Will return an error if the the authentication key cannot be verified. + /// + /// # Context: Authentication + pub async fn authenticate(&self, key: &Key) -> Result<(), auth::Error> { + if self.is_private() { + self.verify_auth_key(key).await + } else { + Ok(()) + } + } + + /// Adds new peer keys to the tracker. + /// + /// Keys can be pre-generated or randomly created. They can also be permanent or expire. + /// + /// # Errors + /// + /// Will return an error if: + /// + /// - The key duration overflows the duration type maximum value. + /// - The provided pre-generated key is invalid. + /// - The key could not been persisted due to database issues. + pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { + // code-review: all methods related to keys should be moved to a new independent "keys" service. + + match add_key_req.opt_key { + // Upload pre-generated key + Some(pre_existing_key) => { + if let Some(seconds_valid) = add_key_req.opt_seconds_valid { + // Expiring key + let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { + return Err(PeerKeyError::DurationOverflow { seconds_valid }); + }; + + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_auth_key(key, Some(valid_until)).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } else { + // Permanent key + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_permanent_auth_key(key).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } + } + // Generate a new random key + None => match add_key_req.opt_seconds_valid { + // Expiring key + Some(seconds_valid) => match self.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + // Permanent key + None => match self.generate_permanent_auth_key().await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + }, + } + } + + /// It generates a new permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + pub async fn generate_permanent_auth_key(&self) -> Result { + self.generate_auth_key(None).await + } + + /// It generates a new expiring authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + /// + /// # Arguments + /// + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn generate_auth_key(&self, lifetime: Option) -> Result { + let auth_key = auth::generate_key(lifetime); + + self.database.add_key_to_keys(&auth_key)?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + /// It adds a pre-generated permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `key` - The pre-generated key. + pub async fn add_permanent_auth_key(&self, key: Key) -> Result { + self.add_auth_key(key, None).await + } + + /// It adds a pre-generated authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `key` - The pre-generated key. + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn add_auth_key( + &self, + key: Key, + valid_until: Option, + ) -> Result { + let auth_key = PeerKey { key, valid_until }; + + // code-review: should we return a friendly error instead of the DB + // constrain error when the key already exist? For now, it's returning + // the specif error for each DB driver when a UNIQUE constrain fails. + self.database.add_key_to_keys(&auth_key)?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + /// It removes an authentication key. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `key` to the database. + pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { + self.database.remove_key_from_keys(key)?; + self.keys.write().await.remove(key); + Ok(()) + } + + /// It verifies an authentication key. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `key::Error` if unable to get any `auth_key`. + async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> { + match self.keys.read().await.get(key) { + None => Err(auth::Error::UnableToReadKey { + location: Location::caller(), + key: Box::new(key.clone()), + }), + Some(key) => match self.config.private_mode { + Some(private_mode) => { + if private_mode.check_keys_expiration { + return auth::verify_key_expiration(key); + } + + Ok(()) + } + None => auth::verify_key_expiration(key), + }, + } + } + + /// The `Tracker` stores the authentication keys in memory and in the database. + /// In case you need to restart the `Tracker` you can load the keys from the database + /// into memory with this function. Keys are automatically stored in the database when they + /// are generated. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to `load_keys` from the database. + pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { + let keys_from_database = self.database.load_keys()?; + let mut keys = self.keys.write().await; + + keys.clear(); + + for key in keys_from_database { + keys.insert(key.key.clone(), key); + } + + Ok(()) + } + + /// Right now, there is only authorization when the `Tracker` runs in + /// `listed` or `private_listed` modes. + /// + /// # Context: Authorization + /// + /// # Errors + /// + /// Will return an error if the tracker is running in `listed` mode + /// and the infohash is not whitelisted. + pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { + if !self.is_listed() { + return Ok(()); + } + + if self.is_info_hash_whitelisted(info_hash).await { + return Ok(()); + } + + Err(Error::TorrentNotWhitelisted { + info_hash: *info_hash, + location: Location::caller(), + }) + } + + /// It adds a torrent to the whitelist. + /// Adding torrents is not relevant to public trackers. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.add_torrent_to_database_whitelist(info_hash)?; + self.add_torrent_to_memory_whitelist(info_hash).await; + Ok(()) + } + + /// It adds a torrent to the whitelist if it has not been whitelisted previously + fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if is_whitelisted { + return Ok(()); + } + + self.database.add_info_hash_to_whitelist(*info_hash)?; + + Ok(()) + } + + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + + /// It removes a torrent from the whitelist. + /// Removing torrents is not relevant to public trackers. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.remove_torrent_from_database_whitelist(info_hash)?; + self.remove_torrent_from_memory_whitelist(info_hash).await; + Ok(()) + } + + /// It removes a torrent from the whitelist in the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if !is_whitelisted { + return Ok(()); + } + + self.database.remove_info_hash_from_whitelist(*info_hash)?; + + Ok(()) + } + + /// It removes a torrent from the whitelist in memory. + /// + /// # Context: Whitelist + pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.remove(info_hash) + } + + /// It checks if a torrent is whitelisted. + /// + /// # Context: Whitelist + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + /// It loads the whitelist from the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist()?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _: bool = whitelist.insert(info_hash); + } + + Ok(()) + } + + /// It return the `Tracker` [`statistics::Metrics`]. + /// + /// # Context: Statistics + pub async fn get_stats(&self) -> tokio::sync::RwLockReadGuard<'_, statistics::Metrics> { + self.stats_repository.get_stats().await + } + + /// It allows to send a statistic events which eventually will be used to update [`statistics::Metrics`]. + /// + /// # Context: Statistics + pub async fn send_stats_event(&self, event: statistics::Event) -> Option>> { + match &self.stats_event_sender { + None => None, + Some(stats_event_sender) => stats_event_sender.send_event(event).await, + } + } + + /// It drops the database tables. + /// + /// # Errors + /// + /// Will return `Err` if unable to drop tables. + pub fn drop_database_tables(&self) -> Result<(), databases::error::Error> { + // todo: this is only used for testing. WE have to pass the database + // reference directly to the tests instead of via the tracker. + self.database.drop_database_tables() + } +} + +#[must_use] +fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { + if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { + host_ip + } else { + *remote_client_ip + } +} + +#[cfg(test)] +mod tests { + + mod the_tracker { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + use torrust_tracker_test_helpers::configuration; + + use crate::core::peer::Peer; + use crate::core::services::tracker_factory; + use crate::core::{TorrentsMetrics, Tracker}; + use crate::shared::bit_torrent::info_hash::fixture::gen_seeded_infohash; + + fn public_tracker() -> Tracker { + tracker_factory(&configuration::ephemeral_public()) + } + + fn private_tracker() -> Tracker { + tracker_factory(&configuration::ephemeral_private()) + } + + fn whitelisted_tracker() -> Tracker { + tracker_factory(&configuration::ephemeral_listed()) + } + + pub fn tracker_persisting_torrents_in_database() -> Tracker { + let mut configuration = configuration::ephemeral(); + configuration.core.tracker_policy.persistent_torrent_completed_stat = true; + tracker_factory(&configuration) + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + // The client peer IP + fn peer_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) + } + + /// Sample peer whose state is not relevant for the tests + fn sample_peer() -> Peer { + complete_peer() + } + + /// Sample peer when for tests that need more than one peer + fn sample_peer_1() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Completed, + } + } + + /// Sample peer when for tests that need more than one peer + fn sample_peer_2() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Completed, + } + } + + fn seeder() -> Peer { + complete_peer() + } + + fn leecher() -> Peer { + incomplete_peer() + } + + fn started_peer() -> Peer { + incomplete_peer() + } + + fn completed_peer() -> Peer { + complete_peer() + } + + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. + fn complete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + /// A peer that counts as `incomplete` is swarm metadata + fn incomplete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(1000), // Still bytes to download + event: AnnounceEvent::Started, + } + } + + #[tokio::test] + async fn should_collect_torrent_metrics() { + let tracker = public_tracker(); + + let torrents_metrics = tracker.get_torrents_metrics(); + + assert_eq!( + torrents_metrics, + TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 0, + torrents: 0 + } + ); + } + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + tracker.upsert_peer_and_get_stats(&info_hash, &peer); + + let peers = tracker.get_torrent_peers(&info_hash); + + assert_eq!(peers, vec![Arc::new(peer)]); + } + + /// It generates a peer id from a number where the number is the last + /// part of the peer ID. For example, for `12` it returns + /// `-qB00000000000000012`. + fn numeric_peer_id(two_digits_value: i32) -> PeerId { + // Format idx as a string with leading zeros, ensuring it has exactly 2 digits + let idx_str = format!("{two_digits_value:02}"); + + // Create the base part of the peer ID. + let base = b"-qB00000000000000000"; + + // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. + let mut peer_id_bytes = [0u8; 20]; + peer_id_bytes[..base.len()].copy_from_slice(base); + peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); + + PeerId(peer_id_bytes) + } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + + for idx in 1..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + tracker.upsert_peer_and_get_stats(&info_hash, &peer); + } + + let peers = tracker.get_torrent_peers(&info_hash); + + assert_eq!(peers.len(), 74); + } + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + tracker.upsert_peer_and_get_stats(&info_hash, &peer); + + let peers = tracker.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { + let tracker = public_tracker(); + + let info_hash = sample_info_hash(); + + let excluded_peer = sample_peer(); + + tracker.upsert_peer_and_get_stats(&info_hash, &excluded_peer); + + // Add 74 peers + for idx in 2..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + tracker.upsert_peer_and_get_stats(&info_hash, &peer); + } + + let peers = tracker.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + + assert_eq!(peers.len(), 74); + } + + #[tokio::test] + async fn it_should_return_the_torrent_metrics() { + let tracker = public_tracker(); + + tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()); + + let torrent_metrics = tracker.get_torrents_metrics(); + + assert_eq!( + torrent_metrics, + TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1, + torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_get_many_the_torrent_metrics() { + let tracker = public_tracker(); + + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + tracker.upsert_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()); + } + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let torrent_metrics = tracker.get_torrents_metrics(); + let result_b = start_time.elapsed(); + + assert_eq!( + (torrent_metrics), + (TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1_000_000, + torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } + + mod for_all_config_modes { + + mod handling_an_announce_request { + + use std::sync::Arc; + + use crate::core::tests::the_tracker::{ + peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, + }; + use crate::core::PeersWanted; + + mod should_assign_the_ip_to_the_peer { + + use std::net::{IpAddr, Ipv4Addr}; + + use crate::core::assign_ip_address_to_peer; + + #[test] + fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + mod and_when_the_client_ip_is_a_ipv4_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::core::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( + ) { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + + mod and_when_client_ip_is_a_ipv6_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::core::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( + ) { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + } + + #[tokio::test] + async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { + let tracker = public_tracker(); + + let mut peer = sample_peer(); + + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { + let tracker = public_tracker(); + + let mut previously_announced_peer = sample_peer_1(); + tracker.announce( + &sample_info_hash(), + &mut previously_announced_peer, + &peer_ip(), + &PeersWanted::All, + ); + + let mut peer = sample_peer_2(); + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); + } + + mod it_should_update_the_swarm_stats_for_the_torrent { + + use crate::core::tests::the_tracker::{ + completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, + }; + use crate::core::PeersWanted; + + #[tokio::test] + async fn when_the_peer_is_a_seeder() { + let tracker = public_tracker(); + + let mut peer = seeder(); + + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.stats.complete, 1); + } + + #[tokio::test] + async fn when_the_peer_is_a_leecher() { + let tracker = public_tracker(); + + let mut peer = leecher(); + + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.stats.incomplete, 1); + } + + #[tokio::test] + async fn when_a_previously_announced_started_peer_has_completed_downloading() { + let tracker = public_tracker(); + + // We have to announce with "started" event because peer does not count if peer was not previously known + let mut started_peer = started_peer(); + tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip(), &PeersWanted::All); + + let mut completed_peer = completed_peer(); + let announce_data = + tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.stats.downloaded, 1); + } + } + } + + mod handling_a_scrape_request { + + use std::net::{IpAddr, Ipv4Addr}; + + use torrust_tracker_primitives::info_hash::InfoHash; + + use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; + use crate::core::{PeersWanted, ScrapeData, SwarmMetadata}; + + #[tokio::test] + async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( + ) { + let tracker = public_tracker(); + + let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; + + let scrape_data = tracker.scrape(&info_hashes).await; + + let mut expected_scrape_data = ScrapeData::empty(); + + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { + let tracker = public_tracker(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + // Announce a "complete" peer for the torrent + let mut complete_peer = complete_peer(); + tracker.announce( + &info_hash, + &mut complete_peer, + &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), + &PeersWanted::All, + ); + + // Announce an "incomplete" peer for the torrent + let mut incomplete_peer = incomplete_peer(); + tracker.announce( + &info_hash, + &mut incomplete_peer, + &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), + &PeersWanted::All, + ); + + // Scrape + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 0, // the "complete" peer does not count because it was not previously known + downloaded: 0, + incomplete: 1, // the "incomplete" peer we have just announced + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_allow_scraping_for_multiple_torrents() { + let tracker = public_tracker(); + + let info_hashes = vec![ + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), + ]; + + let scrape_data = tracker.scrape(&info_hashes).await; + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[1]); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + } + + mod configured_as_whitelisted { + + mod handling_authorization { + use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = tracker.add_torrent_to_whitelist(&info_hash).await; + assert!(result.is_ok()); + + let result = tracker.authorize(&info_hash).await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = tracker.authorize(&info_hash).await; + assert!(result.is_err()); + } + } + + mod handling_the_torrent_whitelist { + use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_add_a_torrent_to_the_whitelist() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + } + + #[tokio::test] + async fn it_should_remove_a_torrent_from_the_whitelist() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + tracker.remove_torrent_from_whitelist(&info_hash).await.unwrap(); + + assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + } + + mod persistence { + use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_load_the_whitelist_from_the_database() { + let tracker = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + // Remove torrent from the in-memory whitelist + tracker.whitelist.write().await.remove(&info_hash); + assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + + tracker.load_whitelist_from_database().await.unwrap(); + + assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + } + } + } + + mod handling_an_announce_request {} + + mod handling_an_scrape_request { + + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::core::tests::the_tracker::{ + complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, + }; + use crate::core::{PeersWanted, ScrapeData}; + + #[test] + fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { + // Zeroed scrape data is used when the authentication for the scrape request fails. + + let sample_info_hash = sample_info_hash(); + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_zeroed_metadata(&sample_info_hash); + + assert_eq!(ScrapeData::zeroed(&vec![sample_info_hash]), expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { + let tracker = whitelisted_tracker(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + + let mut peer = incomplete_peer(); + tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + + // Announce twice to force non zeroed swarm metadata + let mut peer = complete_peer(); + tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + + let scrape_data = tracker.scrape(&vec![info_hash]).await; + + // The expected zeroed swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file(&info_hash, SwarmMetadata::zeroed()); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + } + + mod configured_as_private { + + mod handling_authentication { + use std::str::FromStr; + use std::time::Duration; + + use crate::core::auth::{self}; + use crate::core::tests::the_tracker::private_tracker; + + #[tokio::test] + async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { + let tracker = private_tracker(); + + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let result = tracker.authenticate(&unregistered_key).await; + + assert!(result.is_err()); + } + + #[tokio::test] + async fn it_should_fail_verifying_an_unregistered_authentication_key() { + let tracker = private_tracker(); + + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + assert!(tracker.verify_auth_key(&unregistered_key).await.is_err()); + } + + #[tokio::test] + async fn it_should_remove_an_authentication_key() { + let tracker = private_tracker(); + + let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + + let result = tracker.remove_auth_key(&expiring_key.key()).await; + + assert!(result.is_ok()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_err()); + } + + #[tokio::test] + async fn it_should_load_authentication_keys_from_the_database() { + let tracker = private_tracker(); + + let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + + // Remove the newly generated key in memory + tracker.keys.write().await.remove(&expiring_key.key()); + + let result = tracker.load_keys_from_database().await; + + assert!(result.is_ok()); + assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); + } + + mod with_expiring_and { + + mod randomly_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; + + use crate::core::auth::Key; + use crate::core::tests::the_tracker::private_tracker; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_generate_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let mut tracker = private_tracker(); + + tracker.config.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + let past_timestamp = Duration::ZERO; + + let peer_key = tracker + .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) + .await + .unwrap(); + + assert!(tracker.authenticate(&peer_key.key()).await.is_ok()); + } + } + + mod pre_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; + + use crate::core::auth::Key; + use crate::core::tests::the_tracker::private_tracker; + use crate::core::AddKeyRequest; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let mut tracker = private_tracker(); + + tracker.config.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(0), + }) + .await + .unwrap(); + + assert!(tracker.authenticate(&peer_key.key()).await.is_ok()); + } + } + } + + mod with_permanent_and { + + mod randomly_generated_keys { + use crate::core::tests::the_tracker::private_tracker; + + #[tokio::test] + async fn it_should_generate_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_permanent_auth_key().await.unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_permanent_auth_key().await.unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + } + + mod pre_generated_keys { + use crate::core::auth::Key; + use crate::core::tests::the_tracker::private_tracker; + use crate::core::AddKeyRequest; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + } + } + } + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } + + mod configured_as_private_and_whitelisted { + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } + + mod handling_torrent_persistence { + + use aquatic_udp_protocol::AnnounceEvent; + use torrust_tracker_torrent_repository::entry::EntrySync; + use torrust_tracker_torrent_repository::repository::Repository; + + use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + + #[tokio::test] + async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { + let tracker = tracker_persisting_torrents_in_database(); + + let info_hash = sample_info_hash(); + + let mut peer = sample_peer(); + + peer.event = AnnounceEvent::Started; + let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer); + assert_eq!(swarm_stats.downloaded, 0); + + peer.event = AnnounceEvent::Completed; + let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer); + assert_eq!(swarm_stats.downloaded, 1); + + // Remove the newly updated torrent from memory + tracker.torrents.remove(&info_hash); + + tracker.load_torrents_from_database().unwrap(); + + let torrent_entry = tracker.torrents.get(&info_hash).expect("it should be able to get entry"); + + // It persists the number of completed peers. + assert_eq!(torrent_entry.get_swarm_metadata().downloaded, 1); + + // It does not persist the peers + assert!(torrent_entry.peers_is_empty()); + } + } + } +} diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs new file mode 100644 index 000000000..b60ca3f6d --- /dev/null +++ b/src/core/peer_tests.rs @@ -0,0 +1,47 @@ +#![cfg(test)] + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time}; +use torrust_tracker_primitives::peer; + +use crate::CurrentClock; + +#[test] +fn it_should_be_serializable() { + clock::Stopped::local_set_to_unix_epoch(); + + let torrent_peer = peer::Peer { + peer_id: PeerId(*b"-qB0000-000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: CurrentClock::now(), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + }; + + let raw_json = serde_json::to_string(&torrent_peer).unwrap(); + + let expected_raw_json = r#" + { + "peer_id": { + "id": "0x2d7142303030302d303030303030303030303030", + "client": "qBittorrent" + }, + "peer_addr":"126.0.0.1:8080", + "updated":0, + "uploaded":0, + "downloaded":0, + "left":0, + "event":"Started" + } + "#; + + assert_eq!( + serde_json::from_str::(&raw_json).unwrap(), + serde_json::from_str::(expected_raw_json).unwrap() + ); +} diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs new file mode 100644 index 000000000..166f40df4 --- /dev/null +++ b/src/core/services/mod.rs @@ -0,0 +1,33 @@ +//! Tracker domain services. Core and statistics services. +//! +//! There are two types of service: +//! +//! - [Core tracker services](crate::core::services::torrent): related to the tracker main functionalities like getting info about torrents. +//! - [Services for statistics](crate::core::services::statistics): related to tracker metrics. Aggregate data about the tracker server. +pub mod statistics; +pub mod torrent; + +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use crate::core::Tracker; + +/// It returns a new tracker building its dependencies. +/// +/// # Panics +/// +/// Will panic if tracker cannot be instantiated. +#[must_use] +pub fn tracker_factory(config: &Configuration) -> Tracker { + // Initialize statistics + let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + // Initialize Torrust tracker + match Tracker::new(&Arc::new(config).core, stats_event_sender, stats_repository) { + Ok(tracker) => tracker, + Err(error) => { + panic!("{}", error) + } + } +} diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs new file mode 100644 index 000000000..ee1c0c4fa --- /dev/null +++ b/src/core/services/statistics/mod.rs @@ -0,0 +1,116 @@ +//! Statistics services. +//! +//! It includes: +//! +//! - A [`factory`](crate::core::services::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the [`tracker metrics`](crate::core::statistics::Metrics). +//! +//! Tracker metrics are collected using a Publisher-Subscribe pattern. +//! +//! The factory function builds two structs: +//! +//! - An statistics [`EventSender`](crate::core::statistics::EventSender) +//! - An statistics [`Repo`](crate::core::statistics::Repo) +//! +//! ```text +//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); +//! ``` +//! +//! The statistics repository is responsible for storing the metrics in memory. +//! The statistics event sender allows sending events related to metrics. +//! There is an event listener that is receiving all the events and processing them with an event handler. +//! Then, the event handler updates the metrics depending on the received event. +//! +//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::Event::Udp4Connect): +//! +//! ```text +//! let result = event_sender.send_event(Event::Udp4Connect).await; +//! ``` +//! +//! Eventually the counter for UDP connections from IPv4 peers will be increased. +//! +//! ```rust,no_run +//! pub struct Metrics { +//! // ... +//! pub udp4_connections_handled: u64, // This will be incremented +//! // ... +//! } +//! ``` +pub mod setup; + +use std::sync::Arc; + +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + +use crate::core::statistics::Metrics; +use crate::core::Tracker; + +/// All the metrics collected by the tracker. +#[derive(Debug, PartialEq)] +pub struct TrackerMetrics { + /// Domain level metrics. + /// + /// General metrics for all torrents (number of seeders, leechers, etcetera) + pub torrents_metrics: TorrentsMetrics, + + /// Application level metrics. Usage statistics/metrics. + /// + /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) + pub protocol_metrics: Metrics, +} + +/// It returns all the [`TrackerMetrics`] +pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { + let torrents_metrics = tracker.get_torrents_metrics(); + let stats = tracker.get_stats().await; + + TrackerMetrics { + torrents_metrics, + protocol_metrics: Metrics { + tcp4_connections_handled: stats.tcp4_connections_handled, + tcp4_announces_handled: stats.tcp4_announces_handled, + tcp4_scrapes_handled: stats.tcp4_scrapes_handled, + tcp6_connections_handled: stats.tcp6_connections_handled, + tcp6_announces_handled: stats.tcp6_announces_handled, + tcp6_scrapes_handled: stats.tcp6_scrapes_handled, + udp4_connections_handled: stats.udp4_connections_handled, + udp4_announces_handled: stats.udp4_announces_handled, + udp4_scrapes_handled: stats.udp4_scrapes_handled, + udp6_connections_handled: stats.udp6_connections_handled, + udp6_announces_handled: stats.udp6_announces_handled, + udp6_scrapes_handled: stats.udp6_scrapes_handled, + }, + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_test_helpers::configuration; + + use crate::core; + use crate::core::services::statistics::{get_metrics, TrackerMetrics}; + use crate::core::services::tracker_factory; + + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + #[tokio::test] + async fn the_statistics_service_should_return_the_tracker_metrics() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let tracker_metrics = get_metrics(tracker.clone()).await; + + assert_eq!( + tracker_metrics, + TrackerMetrics { + torrents_metrics: TorrentsMetrics::default(), + protocol_metrics: core::statistics::Metrics::default(), + } + ); + } +} diff --git a/src/core/services/statistics/setup.rs b/src/core/services/statistics/setup.rs new file mode 100644 index 000000000..37603852b --- /dev/null +++ b/src/core/services/statistics/setup.rs @@ -0,0 +1,49 @@ +//! Setup for the tracker statistics. +//! +//! The [`factory`] function builds the structs needed for handling the tracker metrics. +use crate::core::statistics; + +/// It builds the structs needed for handling the tracker metrics. +/// +/// It returns: +/// +/// - An statistics [`EventSender`](crate::core::statistics::EventSender) that allows you to send events related to statistics. +/// - An statistics [`Repo`](crate::core::statistics::Repo) which is an in-memory repository for the tracker metrics. +/// +/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics +/// events are sent are received but not dispatched to the handler. +#[must_use] +pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { + let mut stats_event_sender = None; + + let mut stats_tracker = statistics::Keeper::new(); + + if tracker_usage_statistics { + stats_event_sender = Some(stats_tracker.run_event_listener()); + } + + (stats_event_sender, stats_tracker.repository) +} + +#[cfg(test)] +mod test { + use super::factory; + + #[tokio::test] + async fn should_not_send_any_event_when_statistics_are_disabled() { + let tracker_usage_statistics = false; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_none()); + } + + #[tokio::test] + async fn should_send_events_when_statistics_are_enabled() { + let tracker_usage_statistics = true; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_some()); + } +} diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs new file mode 100644 index 000000000..3b014982d --- /dev/null +++ b/src/core/services/torrent.rs @@ -0,0 +1,306 @@ +//! Core tracker domain services. +//! +//! There are two services: +//! +//! - [`get_torrent_info`]: it returns all the data about one torrent. +//! - [`get_torrents`]: it returns data about some torrent in bulk excluding the peer list. +use std::sync::Arc; + +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; + +use crate::core::Tracker; + +/// It contains all the information the tracker has about a torrent +#[derive(Debug, PartialEq)] +pub struct Info { + /// The infohash of the torrent this data is related to + pub info_hash: InfoHash, + /// The total number of seeders for this torrent. Peer that actively serving a full copy of the torrent data + pub seeders: u64, + /// The total number of peers that have ever complete downloading this torrent + pub completed: u64, + /// The total number of leechers for this torrent. Peers that actively downloading this torrent + pub leechers: u64, + /// The swarm: the list of peers that are actively trying to download or serving this torrent + pub peers: Option>, +} + +/// It contains only part of the information the tracker has about a torrent +/// +/// It contains the same data as [Info] but without the list of peers in the swarm. +#[derive(Debug, PartialEq, Clone)] +pub struct BasicInfo { + /// The infohash of the torrent this data is related to + pub info_hash: InfoHash, + /// The total number of seeders for this torrent. Peer that actively serving a full copy of the torrent data + pub seeders: u64, + /// The total number of peers that have ever complete downloading this torrent + pub completed: u64, + /// The total number of leechers for this torrent. Peers that actively downloading this torrent + pub leechers: u64, +} + +/// It returns all the information the tracker has about one torrent in a [Info] struct. +pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { + let torrent_entry_option = tracker.torrents.get(info_hash); + + let torrent_entry = torrent_entry_option?; + + let stats = torrent_entry.get_swarm_metadata(); + + let peers = torrent_entry.get_peers(None); + + let peers = Some(peers.iter().map(|peer| (**peer)).collect()); + + Some(Info { + info_hash: *info_hash, + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), + peers, + }) +} + +/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. +pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { + let mut basic_infos: Vec = vec![]; + + for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination) { + let stats = torrent_entry.get_swarm_metadata(); + + basic_infos.push(BasicInfo { + info_hash, + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), + }); + } + + basic_infos +} + +/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. +pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Vec { + let mut basic_infos: Vec = vec![]; + + for info_hash in info_hashes { + if let Some(stats) = tracker.torrents.get(info_hash).map(|t| t.get_swarm_metadata()) { + basic_infos.push(BasicInfo { + info_hash: *info_hash, + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), + }); + } + } + + basic_infos +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } + } + + mod getting_a_torrent_info { + + use std::str::FromStr; + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::core::services::torrent::tests::sample_peer; + use crate::core::services::torrent::{get_torrent_info, Info}; + use crate::core::services::tracker_factory; + + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + #[tokio::test] + async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let torrent_info = get_torrent_info( + tracker.clone(), + &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), + ) + .await; + + assert!(torrent_info.is_none()); + } + + #[tokio::test] + async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); + + let torrent_info = get_torrent_info(tracker.clone(), &info_hash).await.unwrap(); + + assert_eq!( + torrent_info, + Info { + info_hash: InfoHash::from_str(&hash).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![sample_peer()]), + } + ); + } + } + + mod searching_for_torrents { + + use std::str::FromStr; + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::core::services::torrent::tests::sample_peer; + use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; + use crate::core::services::tracker_factory; + + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + #[tokio::test] + async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; + + assert_eq!(torrents, vec![]); + } + + #[tokio::test] + async fn should_return_a_summarized_info_for_all_torrents() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); + + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; + + assert_eq!( + torrents, + vec![BasicInfo { + info_hash: InfoHash::from_str(&hash).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }] + ); + } + + #[tokio::test] + async fn should_allow_limiting_the_number_of_torrents_in_the_result() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); + + let offset = 0; + let limit = 1; + + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; + + assert_eq!(torrents.len(), 1); + } + + #[tokio::test] + async fn should_allow_using_pagination_in_the_result() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); + + let offset = 1; + let limit = 4000; + + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; + + assert_eq!(torrents.len(), 1); + assert_eq!( + torrents, + vec![BasicInfo { + info_hash: InfoHash::from_str(&hash1).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }] + ); + } + + #[tokio::test] + async fn should_return_torrents_ordered_by_info_hash() { + let tracker = Arc::new(tracker_factory(&tracker_configuration())); + + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let info_hash2 = InfoHash::from_str(&hash2).unwrap(); + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); + + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; + + assert_eq!( + torrents, + vec![ + BasicInfo { + info_hash: InfoHash::from_str(&hash2).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + }, + BasicInfo { + info_hash: InfoHash::from_str(&hash1).unwrap(), + seeders: 1, + completed: 0, + leechers: 0, + } + ] + ); + } + } +} diff --git a/src/core/statistics.rs b/src/core/statistics.rs new file mode 100644 index 000000000..c9681d23c --- /dev/null +++ b/src/core/statistics.rs @@ -0,0 +1,488 @@ +//! Structs to collect and keep tracker metrics. +//! +//! The tracker collects metrics such as: +//! +//! - Number of connections handled +//! - Number of `announce` requests handled +//! - Number of `scrape` request handled +//! +//! These metrics are collected for each connection type: UDP and HTTP and +//! also for each IP version used by the peers: IPv4 and IPv6. +//! +//! > Notice: that UDP tracker have an specific `connection` request. For the HTTP metrics the counter counts one connection for each `announce` or `scrape` request. +//! +//! The data is collected by using an `event-sender -> event listener` model. +//! +//! The tracker uses an [`statistics::EventSender`](crate::core::statistics::EventSender) instance to send an event. +//! The [`statistics::Keeper`](crate::core::statistics::Keeper) listens to new events and uses the [`statistics::Repo`](crate::core::statistics::Repo) to upgrade and store metrics. +//! +//! See the [`statistics::Event`](crate::core::statistics::Event) enum to check which events are available. +use std::sync::Arc; + +use futures::future::BoxFuture; +use futures::FutureExt; +#[cfg(test)] +use mockall::{automock, predicate::str}; +use tokio::sync::mpsc::error::SendError; +use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; + +const CHANNEL_BUFFER_SIZE: usize = 65_535; + +/// An statistics event. It is used to collect tracker metrics. +/// +/// - `Tcp` prefix means the event was triggered by the HTTP tracker +/// - `Udp` prefix means the event was triggered by the UDP tracker +/// - `4` or `6` prefixes means the IP version used by the peer +/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` +/// +/// > NOTE: HTTP trackers do not use `connection` requests. +#[derive(Debug, PartialEq, Eq)] +pub enum Event { + // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } + // Attributes are enums too. + Tcp4Announce, + Tcp4Scrape, + Tcp6Announce, + Tcp6Scrape, + Udp4Connect, + Udp4Announce, + Udp4Scrape, + Udp6Connect, + Udp6Announce, + Udp6Scrape, +} + +/// Metrics collected by the tracker. +/// +/// - Number of connections handled +/// - Number of `announce` requests handled +/// - Number of `scrape` request handled +/// +/// These metrics are collected for each connection type: UDP and HTTP +/// and also for each IP version used by the peers: IPv4 and IPv6. +#[derive(Debug, PartialEq, Default)] +pub struct Metrics { + /// Total number of TCP (HTTP tracker) connections from IPv4 peers. + /// Since the HTTP tracker spec does not require a handshake, this metric + /// increases for every HTTP request. + pub tcp4_connections_handled: u64, + /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. + pub tcp4_announces_handled: u64, + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. + pub tcp4_scrapes_handled: u64, + /// Total number of TCP (HTTP tracker) connections from IPv6 peers. + pub tcp6_connections_handled: u64, + /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. + pub tcp6_announces_handled: u64, + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. + pub tcp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) connections from IPv4 peers. + pub udp4_connections_handled: u64, + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. + pub udp4_announces_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. + pub udp4_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. + pub udp6_connections_handled: u64, + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. + pub udp6_announces_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. + pub udp6_scrapes_handled: u64, +} + +/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). +/// +/// It actively listen to new statistics events. When it receives a new event +/// it accordingly increases the counters. +pub struct Keeper { + pub repository: Repo, +} + +impl Default for Keeper { + fn default() -> Self { + Self::new() + } +} + +impl Keeper { + #[must_use] + pub fn new() -> Self { + Self { repository: Repo::new() } + } + + #[must_use] + pub fn new_active_instance() -> (Box, Repo) { + let mut stats_tracker = Self::new(); + + let stats_event_sender = stats_tracker.run_event_listener(); + + (stats_event_sender, stats_tracker.repository) + } + + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + + let stats_repository = self.repository.clone(); + + tokio::spawn(async move { event_listener(receiver, stats_repository).await }); + + Box::new(Sender { sender }) + } +} + +async fn event_listener(mut receiver: mpsc::Receiver, stats_repository: Repo) { + while let Some(event) = receiver.recv().await { + event_handler(event, &stats_repository).await; + } +} + +async fn event_handler(event: Event, stats_repository: &Repo) { + match event { + // TCP4 + Event::Tcp4Announce => { + stats_repository.increase_tcp4_announces().await; + stats_repository.increase_tcp4_connections().await; + } + Event::Tcp4Scrape => { + stats_repository.increase_tcp4_scrapes().await; + stats_repository.increase_tcp4_connections().await; + } + + // TCP6 + Event::Tcp6Announce => { + stats_repository.increase_tcp6_announces().await; + stats_repository.increase_tcp6_connections().await; + } + Event::Tcp6Scrape => { + stats_repository.increase_tcp6_scrapes().await; + stats_repository.increase_tcp6_connections().await; + } + + // UDP4 + Event::Udp4Connect => { + stats_repository.increase_udp4_connections().await; + } + Event::Udp4Announce => { + stats_repository.increase_udp4_announces().await; + } + Event::Udp4Scrape => { + stats_repository.increase_udp4_scrapes().await; + } + + // UDP6 + Event::Udp6Connect => { + stats_repository.increase_udp6_connections().await; + } + Event::Udp6Announce => { + stats_repository.increase_udp6_announces().await; + } + Event::Udp6Scrape => { + stats_repository.increase_udp6_scrapes().await; + } + } + + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} + +/// A trait to allow sending statistics events +#[cfg_attr(test, automock)] +pub trait EventSender: Sync + Send { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; +} + +/// An [`statistics::EventSender`](crate::core::statistics::EventSender) implementation. +/// +/// It uses a channel sender to send the statistic events. The channel is created by a +/// [`statistics::Keeper`](crate::core::statistics::Keeper) +pub struct Sender { + sender: mpsc::Sender, +} + +impl EventSender for Sender { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event).await) }.boxed() + } +} + +/// A repository for the tracker metrics. +#[derive(Clone)] +pub struct Repo { + pub stats: Arc>, +} + +impl Default for Repo { + fn default() -> Self { + Self::new() + } +} + +impl Repo { + #[must_use] + pub fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(Metrics::default())), + } + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + pub async fn increase_tcp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_scrapes_handled += 1; + drop(stats_lock); + } +} + +#[cfg(test)] +mod tests { + + mod stats_tracker { + use crate::core::statistics::{Event, Keeper, Metrics}; + + #[tokio::test] + async fn should_contain_the_tracker_statistics() { + let stats_tracker = Keeper::new(); + + let stats = stats_tracker.repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); + } + + #[tokio::test] + async fn should_create_an_event_sender_to_send_statistical_events() { + let mut stats_tracker = Keeper::new(); + + let event_sender = stats_tracker.run_event_listener(); + + let result = event_sender.send_event(Event::Udp4Connect).await; + + assert!(result.is_some()); + } + } + + mod event_handler { + use crate::core::statistics::{event_handler, Event, Repo}; + + #[tokio::test] + async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp4Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp6Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { + let stats_repository = Repo::new(); + + event_handler(Event::Udp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_scrapes_handled, 1); + } + } +} diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs new file mode 100644 index 000000000..38311864b --- /dev/null +++ b/src/core/torrent/mod.rs @@ -0,0 +1,30 @@ +//! Structs to store the swarm data. +//! +//! There are to main data structures: +//! +//! - A torrent [`Entry`](torrust_tracker_torrent_repository::entry::Entry): it contains all the information stored by the tracker for one torrent. +//! - The [`SwarmMetadata`](torrust_tracker_primitives::swarm_metadata::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. +//! +//! A "swarm" is a network of peers that are trying to download the same torrent. +//! +//! The torrent entry contains the "swarm" data, which is basically the list of peers in the swarm. +//! That's the most valuable information the peer want to get from the tracker, because it allows them to +//! start downloading torrent from those peers. +//! +//! The "swarm metadata" contains aggregate data derived from the torrent entries. There two types of data: +//! +//! - For **active peers**: metrics related to the current active peers in the swarm. +//! - **Historical data**: since the tracker started running. +//! +//! The tracker collects metrics for: +//! +//! - The number of peers that have completed downloading the torrent since the tracker started collecting metrics. +//! - The number of peers that have completed downloading the torrent and are still active, that means they are actively participating in the network, +//! by announcing themselves periodically to the tracker. Since they have completed downloading they have a full copy of the torrent data. Peers with a +//! full copy of the data are called "seeders". +//! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. +//! Peer that don not have a full copy of the torrent data are called "leechers". +//! +use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; + +pub type Torrents = TorrentsSkipMapMutexStd; // Currently Used diff --git a/src/database.rs b/src/database.rs deleted file mode 100644 index fbec824a0..000000000 --- a/src/database.rs +++ /dev/null @@ -1,150 +0,0 @@ -use crate::{InfoHash, AUTH_KEY_LENGTH}; -use log::debug; -use r2d2_sqlite::{SqliteConnectionManager, rusqlite}; -use r2d2::{Pool}; -use r2d2_sqlite::rusqlite::NO_PARAMS; -use crate::key_manager::AuthKey; -use std::str::FromStr; - -pub struct SqliteDatabase { - pool: Pool -} - -impl SqliteDatabase { - pub fn new(db_path: &str) -> Result { - let sqlite_connection_manager = SqliteConnectionManager::file(db_path); - let sqlite_pool = r2d2::Pool::new(sqlite_connection_manager).expect("Failed to create r2d2 SQLite connection pool."); - let sqlite_database = SqliteDatabase { - pool: sqlite_pool - }; - - if let Err(error) = SqliteDatabase::create_database_tables(&sqlite_database.pool) { - return Err(error) - }; - - Ok(sqlite_database) - } - - pub fn create_database_tables(pool: &Pool) -> Result { - let create_whitelist_table = " - CREATE TABLE IF NOT EXISTS whitelist ( - id integer PRIMARY KEY AUTOINCREMENT, - info_hash VARCHAR(20) NOT NULL UNIQUE - );".to_string(); - - let create_keys_table = format!(" - CREATE TABLE IF NOT EXISTS keys ( - id integer PRIMARY KEY AUTOINCREMENT, - key VARCHAR({}) NOT NULL UNIQUE, - valid_until INT(10) NOT NULL - );", AUTH_KEY_LENGTH as i8); - - let conn = pool.get().unwrap(); - match conn.execute(&create_whitelist_table, NO_PARAMS) { - Ok(updated) => { - match conn.execute(&create_keys_table, NO_PARAMS) { - Ok(updated2) => Ok(updated + updated2), - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - - pub async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; - let mut rows = stmt.query(&[info_hash])?; - - if let Some(row) = rows.next()? { - let info_hash: String = row.get(0).unwrap(); - - // should never be able to fail - Ok(InfoHash::from_str(&info_hash).unwrap()) - } else { - Err(rusqlite::Error::QueryReturnedNoRows) - } - } - - pub async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", &[info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - - pub async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", &[info_hash.to_string()]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - - pub async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().unwrap(); - let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; - let mut rows = stmt.query(&[key.to_string()])?; - - if let Some(row) = rows.next()? { - let key: String = row.get(0).unwrap(); - let valid_until_i64: i64 = row.get(1).unwrap(); - - Ok(AuthKey { - key, - valid_until: Some(valid_until_i64 as u64) - }) - } else { - Err(rusqlite::Error::QueryReturnedNoRows) - } - } - - pub async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - &[auth_key.key.to_string(), auth_key.valid_until.unwrap().to_string()] - ) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } - - pub async fn remove_key_from_keys(&self, key: String) -> Result { - let conn = self.pool.get().unwrap(); - match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { - Ok(updated) => { - if updated > 0 { return Ok(updated) } - Err(rusqlite::Error::ExecuteReturnedResults) - }, - Err(e) => { - debug!("{:?}", e); - Err(e) - } - } - } -} diff --git a/src/http_api_server.rs b/src/http_api_server.rs deleted file mode 100644 index 5f7339036..000000000 --- a/src/http_api_server.rs +++ /dev/null @@ -1,238 +0,0 @@ -use crate::tracker::{TorrentTracker}; -use serde::{Deserialize, Serialize}; -use std::cmp::min; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use warp::{filters, reply, reply::Reply, serve, Filter, Server}; -use super::common::*; - -#[derive(Deserialize, Debug)] -struct TorrentInfoQuery { - offset: Option, - limit: Option, -} - -#[derive(Serialize)] -struct Torrent<'a> { - info_hash: &'a InfoHash, - #[serde(flatten)] - data: &'a crate::tracker::TorrentEntry, - seeders: u32, - completed: u32, - leechers: u32, - - #[serde(skip_serializing_if = "Option::is_none")] - peers: Option>, -} - -#[derive(Serialize, Debug)] -#[serde(tag = "status", rename_all = "snake_case")] -enum ActionStatus<'a> { - Ok, - Err { reason: std::borrow::Cow<'a, str> }, -} - -impl warp::reject::Reject for ActionStatus<'static> {} - -fn authenticate(tokens: HashMap) -> impl Filter + Clone { - #[derive(Deserialize)] - struct AuthToken { - token: Option, - } - - let tokens: HashSet = tokens.into_iter().map(|(_, v)| v).collect(); - - let tokens = Arc::new(tokens); - warp::filters::any::any() - .map(move || tokens.clone()) - .and(filters::query::query::()) - .and_then(|tokens: Arc>, token: AuthToken| { - async move { - match token.token { - Some(token) => { - if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { reason: "token not valid".into() })) - } - - Ok(()) - } - None => Err(warp::reject::custom(ActionStatus::Err { reason: "unauthorized".into() })) - } - } - }) - .untuple_one() -} - -pub fn build_server(tracker: Arc) -> Server + Clone + Send + Sync + 'static> { - // GET /api/torrents?offset=:u32&limit=:u32 - // View torrent list - let t1 = tracker.clone(); - let view_torrent_list = filters::method::get() - .and(filters::path::path("torrents")) - .and(filters::path::end()) - .and(filters::query::query()) - .map(move |limits| { - let tracker = t1.clone(); - (limits, tracker) - }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| { - async move { - let offset = limits.offset.unwrap_or(0); - let limit = min(limits.limit.unwrap_or(1000), 4000); - - let db = tracker.get_torrents().await; - let results: Vec<_> = db - .iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - Torrent { - info_hash, - data: torrent_entry, - seeders, - completed, - leechers, - peers: None, - } - }) - .skip(offset as usize) - .take(limit as usize) - .collect(); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - } - }); - - // GET /api/torrent/:infohash - // View torrent info - let t2 = tracker.clone(); - let view_torrent_info = filters::method::get() - .and(filters::path::path("torrent")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t2.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - let db = tracker.get_torrents().await; - let torrent_entry_option = db.get(&info_hash); - - if torrent_entry_option.is_none() { - return Err(warp::reject::custom(ActionStatus::Err { reason: "torrent does not exist".into() })) - } - - let torrent_entry = torrent_entry_option.unwrap(); - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - let peers: Vec<_> = torrent_entry - .get_peers_iter() - .take(1000) - .map(|(peer_id, peer_info)| (peer_id.clone(), peer_info.clone())) - .collect(); - - Ok(reply::json(&Torrent { - info_hash: &info_hash, - data: torrent_entry, - seeders, - completed, - leechers, - peers: Some(peers), - })) - } - }); - - // DELETE /api/whitelist/:info_hash - // Delete info hash from whitelist - let t3 = tracker.clone(); - let delete_torrent = filters::method::delete() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t3.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to remove torrent from whitelist".into() })) - } - } - }); - - // POST /api/whitelist/:info_hash - // Add info hash to whitelist - let t4 = tracker.clone(); - let add_torrent = filters::method::post() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t4.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| { - async move { - match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to whitelist torrent".into() })) - } - } - }); - - // POST /api/key/:seconds_valid - // Generate new key - let t5 = tracker.clone(); - let create_key = filters::method::post() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |seconds_valid: u64| { - let tracker = t5.clone(); - (seconds_valid, tracker) - }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| { - async move { - match tracker.generate_auth_key(seconds_valid).await { - Ok(auth_key) => Ok(warp::reply::json(&auth_key)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into() })) - } - } - }); - - // DELETE /api/key/:key - // Delete key - let t6 = tracker.clone(); - let delete_key = filters::method::delete() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |key: String| { - let tracker = t6.clone(); - (key, tracker) - }) - .and_then(|(key, tracker): (String, Arc)| { - async move { - match tracker.remove_auth_key(key).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to delete key".into() })) - } - } - }); - - let api_routes = - filters::path::path("api") - .and(view_torrent_list - .or(delete_torrent) - .or(view_torrent_info) - .or(add_torrent) - .or(create_key) - .or(delete_key) - ); - - let server = api_routes.and(authenticate(tracker.config.http_api.as_ref().unwrap().access_tokens.clone())); - - serve(server) -} diff --git a/src/http_server.rs b/src/http_server.rs deleted file mode 100644 index bf1f7f88d..000000000 --- a/src/http_server.rs +++ /dev/null @@ -1,369 +0,0 @@ -use std::collections::{HashMap}; -use crate::tracker::{TorrentTracker}; -use serde::{Deserialize, Serialize}; -use std::convert::Infallible; -use std::error::Error; -use std::io::Write; -use std::net::{IpAddr, SocketAddr}; -use std::sync::Arc; -use std::str::FromStr; -use log::{debug}; -use warp::{filters, reply::Reply, Filter}; -use warp::http::Response; -use crate::{TorrentError, TorrentPeer, TorrentStats}; -use crate::key_manager::AuthKey; -use crate::utils::url_encode_bytes; -use super::common::*; - -#[derive(Deserialize, Debug)] -pub struct AnnounceRequest { - pub downloaded: NumberOfBytes, - pub uploaded: NumberOfBytes, - pub key: String, - pub peer_id: String, - pub port: u16, - pub info_hash: String, - pub left: NumberOfBytes, - pub event: Option, - pub compact: Option, -} - -impl AnnounceRequest { - pub fn is_compact(&self) -> bool { - self.compact.unwrap_or(0) == 1 - } -} - -#[derive(Deserialize, Debug)] -pub struct ScrapeRequest { - pub info_hash: String, -} - -#[derive(Serialize)] -struct Peer { - peer_id: String, - ip: IpAddr, - port: u16, -} - -#[derive(Serialize)] -struct AnnounceResponse { - interval: u32, - //tracker_id: String, - complete: u32, - incomplete: u32, - peers: Vec -} - -impl AnnounceResponse { - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } - - pub fn write_compact(&self) -> Result, Box> { - let mut peers_v4: Vec = Vec::new(); - let mut peers_v6: Vec = Vec::new(); - - for peer in &self.peers { - match peer.ip { - IpAddr::V4(ip) => { - peers_v4.write(&u32::from(ip).to_be_bytes())?; - peers_v4.write(&peer.port.to_be_bytes())?; - } - IpAddr::V6(ip) => { - peers_v6.write(&u128::from(ip).to_be_bytes())?; - peers_v6.write(&peer.port.to_be_bytes())?; - } - } - } - - debug!("{:?}", String::from_utf8_lossy(peers_v4.as_slice())); - debug!("{:?}", String::from_utf8_lossy(peers_v6.as_slice())); - - let mut bytes: Vec = Vec::new(); - bytes.write(b"d8:intervali")?; - bytes.write(&self.interval.to_string().as_bytes())?; - bytes.write(b"e8:completei")?; - bytes.write(&self.complete.to_string().as_bytes())?; - bytes.write(b"e10:incompletei")?; - bytes.write(&self.incomplete.to_string().as_bytes())?; - bytes.write(b"e5:peers")?; - bytes.write(&peers_v4.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v4.as_slice())?; - bytes.write(b"e6:peers6")?; - bytes.write(&peers_v6.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v6.as_slice())?; - bytes.write(b"e")?; - - debug!("{:?}", String::from_utf8_lossy(bytes.as_slice())); - Ok(bytes) - } -} - -#[derive(Serialize)] -struct ScrapeResponse { - files: HashMap -} - -impl ScrapeResponse { - pub fn write(&self) -> String { - serde_bencode::to_string(&self).unwrap() - } -} - -#[derive(Serialize)] -struct ScrapeResponseEntry { - complete: u32, - downloaded: u32, - incomplete: u32, -} - -#[derive(Serialize)] -struct ErrorResponse { - failure_reason: String -} - -impl warp::Reply for ErrorResponse { - fn into_response(self) -> warp::reply::Response { - Response::new(format!("{}", serde_bencode::to_string(&self).unwrap()).into()) - } -} - -#[derive(Clone)] -pub struct HttpServer { - tracker: Arc, -} - -impl HttpServer { - pub fn new(tracker: Arc) -> HttpServer { - HttpServer { - tracker - } - } - - // &self did not work here - pub fn routes(http_server: Arc) -> impl Filter + Clone + Send + Sync + 'static { - // optional tracker key - let opt_key = warp::path::param::() - .map(Some) - .or_else(|_| async { - // Ok(None) - Ok::<(Option,), std::convert::Infallible>((None,)) - }); - - // GET /announce?key=:String - // Announce peer - let hs1 = http_server.clone(); - let announce_route = - filters::path::path("announce") - .and(filters::method::get()) - .and(warp::addr::remote()) - .and(opt_key) - .and(filters::query::raw()) - .and(filters::query::query()) - .map(move |remote_addr, key, raw_query, query| { - debug!("Request: {}", raw_query); - (remote_addr, key, raw_query, query, hs1.clone()) - }) - .and_then(move |(remote_addr, key, raw_query, mut query, http_server): (Option, Option, String, AnnounceRequest, Arc)| { - async move { - if remote_addr.is_none() { return HttpServer::send_error("could not get remote address") } - - // query.info_hash somehow receives a corrupt string - // so we have to get the info_hash manually from the raw query - let info_hashes = HttpServer::info_hashes_from_raw_query(&raw_query); - if info_hashes.len() < 1 { return HttpServer::send_error("info_hash not found") } - query.info_hash = info_hashes[0].to_string(); - debug!("{:?}", query.info_hash); - - if let Some(err) = http_server.authenticate_request(&query.info_hash, key).await { return err } - - http_server.handle_announce(query, remote_addr.unwrap()).await - } - }); - - // GET /scrape?key=:String - // Get torrent info - let hs2 = http_server.clone(); - let scrape_route = - filters::path::path("scrape") - .and(filters::method::get()) - .and(opt_key) - .and(filters::query::raw()) - .map(move |key, raw_query| { - debug!("Request: {}", raw_query); - (key, raw_query, hs2.clone()) - }) - .and_then(move |(key, raw_query, http_server): (Option, String, Arc)| { - async move { - let info_hashes = HttpServer::info_hashes_from_raw_query(&raw_query); - if info_hashes.len() < 1 { return HttpServer::send_error("info_hash not found") } - if info_hashes.len() > 50 { return HttpServer::send_error("exceeded the max of 50 info_hashes") } - debug!("{:?}", info_hashes); - - // todo: verify all info_hashes before scrape - if let Some(err) = http_server.authenticate_request(&info_hashes[0].to_string(), key).await { return err } - - http_server.handle_scrape(info_hashes).await - } - }); - - // all routes - warp::any().and(announce_route.or(scrape_route)) - } - - fn info_hashes_from_raw_query(raw_query: &str) -> Vec { - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); - let mut info_hashes: Vec = Vec::new(); - - for v in split_raw_query { - if v.contains("info_hash") { - let raw_info_hash = v.split("=").collect::>()[1]; - let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); - let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); - if let Ok(ih) = info_hash { - info_hashes.push(ih); - } - } - } - - info_hashes - } - - fn send_announce_response(query: &AnnounceRequest, torrent_stats: TorrentStats, peers: Vec, interval: u32) -> Result { - let http_peers: Vec = peers.iter().map(|peer| Peer { - peer_id: String::from_utf8_lossy(&peer.peer_id.0).to_string(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port() - }).collect(); - - let res = AnnounceResponse { - interval, - complete: torrent_stats.seeders, - incomplete: torrent_stats.leechers, - peers: http_peers - }; - - // check for compact response request - let response = match query.compact { - None => Response::new(res.write().into()), - Some(int) => { - if int == 1 { - let res_compact = res.write_compact(); - match res_compact { - Ok(response) => Response::new(response.into()), - Err(e) => { - debug!("{}", e); - HttpServer::send_error("server error").unwrap() - } - } - } else { - Response::new(res.write().into()) - } - } - }; - - Ok(response) - } - - fn send_error(msg: &str) -> Result { - Ok(ErrorResponse { - failure_reason: msg.to_string() - }.into_response()) - } - - async fn authenticate_request(&self, info_hash_str: &str, key: Option) -> Option> { - let info_hash= InfoHash::from_str(info_hash_str); - if info_hash.is_err() { return Some(HttpServer::send_error("invalid info_hash")) } - - let auth_key = match key { - None => None, - Some(v) => AuthKey::from_string(&v) - }; - - if let Err(e) = self.tracker.authenticate_request(&info_hash.unwrap(), &auth_key).await { - return match e { - TorrentError::TorrentNotWhitelisted => { - debug!("Info_hash not whitelisted."); - Some(HttpServer::send_error("torrent not whitelisted")) - } - TorrentError::PeerKeyNotValid => { - debug!("Peer key not valid."); - Some(HttpServer::send_error("peer key not valid")) - } - TorrentError::PeerNotAuthenticated => { - debug!("Peer not authenticated."); - Some(HttpServer::send_error("peer not authenticated")) - } - } - } - None - } - - async fn handle_announce(&self, query: AnnounceRequest, remote_addr: SocketAddr) -> Result { - let info_hash = match InfoHash::from_str(&query.info_hash) { - Ok(v) => v, - Err(_) => { - return HttpServer::send_error("info_hash is invalid") - } - }; - - let peer = TorrentPeer::from_http_announce_request(&query, remote_addr, self.tracker.config.get_ext_ip()); - - match self.tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await { - Err(e) => { - debug!("{:?}", e); - HttpServer::send_error("server error") - } - Ok(torrent_stats) => { - // get all peers excluding the client_addr - let peers = self.tracker.get_torrent_peers(&info_hash, &peer.peer_addr).await; - if peers.is_none() { - debug!("No peers found after announce."); - return HttpServer::send_error("peer is invalid") - } - - // todo: add http announce interval config option - // success response - let announce_interval = self.tracker.config.http_tracker.as_ref().unwrap().announce_interval; - HttpServer::send_announce_response(&query, torrent_stats, peers.unwrap(), announce_interval) - } - } - } - - async fn handle_scrape(&self, info_hashes: Vec) -> Result { - let mut res = ScrapeResponse { - files: HashMap::new() - }; - let db = self.tracker.get_torrents().await; - - for info_hash in info_hashes.iter() { - let scrape_entry = match db.get(&info_hash) { - Some(torrent_info) => { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - ScrapeResponseEntry { - complete: seeders, - downloaded: completed, - incomplete: leechers - } - } - None => { - ScrapeResponseEntry { - complete: 0, - downloaded: 0, - incomplete: 0 - } - } - }; - - if let Ok(encoded_info_hash) = url_encode_bytes(&info_hash.0) { - res.files.insert(encoded_info_hash, scrape_entry); - } - } - - Ok(Response::new(res.write().into())) - } -} diff --git a/src/key_manager.rs b/src/key_manager.rs deleted file mode 100644 index b1f16f1dc..000000000 --- a/src/key_manager.rs +++ /dev/null @@ -1,125 +0,0 @@ -use super::common::AUTH_KEY_LENGTH; -use crate::utils::current_time; -use rand::{thread_rng, Rng}; -use rand::distributions::Alphanumeric; -use serde::Serialize; -use log::debug; -use derive_more::{Display, Error}; - -pub fn generate_auth_key(seconds_valid: u64) -> AuthKey { - let key: String = thread_rng() - .sample_iter(&Alphanumeric) - .take(AUTH_KEY_LENGTH) - .map(char::from) - .collect(); - - debug!("Generated key: {}, valid for: {} seconds", key, seconds_valid); - - AuthKey { - key, - valid_until: Some(current_time() + seconds_valid), - } -} - -pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { - let current_time = current_time(); - if auth_key.valid_until.is_none() { return Err(Error::KeyInvalid) } - if auth_key.valid_until.unwrap() < current_time { return Err(Error::KeyExpired) } - - Ok(()) -} - -#[derive(Serialize, Debug, Eq, PartialEq, Clone)] -pub struct AuthKey { - pub key: String, - pub valid_until: Option, -} - -impl AuthKey { - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { - if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(AuthKey { - key, - valid_until: None, - }) - } else { - None - } - } - - pub fn from_string(key: &str) -> Option { - if key.len() != AUTH_KEY_LENGTH { - None - } else { - Some(AuthKey { - key: key.to_string(), - valid_until: None, - }) - } - } -} - -#[derive(Debug, Display, PartialEq, Error)] -#[allow(dead_code)] -pub enum Error { - #[display(fmt = "Key could not be verified.")] - KeyVerificationError, - #[display(fmt = "Key is invalid.")] - KeyInvalid, - #[display(fmt = "Key has expired.")] - KeyExpired -} - -impl From for Error { - fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - eprintln!("{}", e); - Error::KeyVerificationError - } -} - -#[cfg(test)] -mod tests { - use crate::key_manager; - - #[test] - fn auth_key_from_buffer() { - let auth_key = key_manager::AuthKey::from_buffer( - [ - 89, 90, 83, 108, - 52, 108, 77, 90, - 117, 112, 82, 117, - 79, 112, 83, 82, - 67, 51, 107, 114, - 73, 75, 82, 53, - 66, 80, 66, 49, - 52, 110, 114, 74] - ); - - assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().key, "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"); - } - - #[test] - fn auth_key_from_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = key_manager::AuthKey::from_string(key_string); - - assert!(auth_key.is_some()); - assert_eq!(auth_key.unwrap().key, key_string); - } - - #[test] - fn generate_valid_auth_key() { - let auth_key = key_manager::generate_auth_key(9999); - - assert!(key_manager::verify_auth_key(&auth_key).is_ok()); - } - - #[test] - fn generate_expired_auth_key() { - let mut auth_key = key_manager::generate_auth_key(0); - auth_key.valid_until = Some(0); - - assert!(key_manager::verify_auth_key(&auth_key).is_err()); - } -} diff --git a/src/lib.rs b/src/lib.rs index 375c0f903..5d7c92ae2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,19 +1,522 @@ -pub mod config; -pub mod udp_server; -pub mod http_server; -pub mod tracker; -pub mod http_api_server; -pub mod common; -pub mod response; -pub mod utils; -pub mod database; -pub mod key_manager; -pub mod logging; +//! **Torrust Tracker** is a modern and feature-rich (private) [`BitTorrent`](https://www.bittorrent.org/) tracker. +//! +//! [`BitTorrent`](https://en.wikipedia.org/wiki/BitTorrent) is a protocol for distributing files using a peer-to-peer network. +//! +//! Peers in the networks need to know where they can find other peers with the files they are looking for. +//! +//! Tracker are services that allow peers to quickly find other peers. Client peers announce their existence to a tracker, +//! and the tracker responds to the peer with a list of other peers in the swarm. +//! +//! You can learn more about `BitTorrent` and `BitTorrent` Trackers on these sites: +//! +//! - +//! - +//! - +//! +//! Torrust Tracker is a `BitTorrent` tracker with a focus on: +//! +//! - Performance +//! - Robustness +//! - Extensibility +//! - Security +//! - Usability +//! - And with a community-driven development +//! +//! # Table of contents +//! +//! - [Features](#features) +//! - [Services](#services) +//! - [Installation](#installation) +//! - [Minimum requirements](#minimum-requirements) +//! - [Prerequisites](#prerequisites) +//! - [Install from sources](#install-from-sources) +//! - [Run with docker](#run-with-docker) +//! - [Configuration](#configuration) +//! - [Usage](#usage) +//! - [API](#api) +//! - [HTTP Tracker](#http-tracker) +//! - [UDP Tracker](#udp-tracker) +//! - [Components](#components) +//! - [Implemented BEPs](#implemented-beps) +//! - [Contributing](#contributing) +//! - [Documentation](#documentation) +//! +//! # Features +//! +//! - Multiple UDP server and HTTP(S) server blocks for socket binding possible +//! - Full IPv4 and IPv6 support for both UDP and HTTP(S) +//! - Private and Whitelisted mode +//! - Built-in API +//! - Peer authentication using time-bound keys +//! - Database persistence for authentication keys, whitelist and completed peers counter +//! - DB Support for `SQLite` and `MySQl` +//! +//! # Services +//! +//! From the end-user perspective the Torrust Tracker exposes three different services. +//! +//! - A REST [`API`](crate::servers::apis) +//! - One or more [`UDP`](crate::servers::udp) trackers +//! - One or more [`HTTP`](crate::servers::http) trackers +//! +//! # Installation +//! +//! ## Minimum requirements +//! +//! - Rust Stable `1.68` +//! - You might have problems compiling with a machine with low resources. +//! +//! It has been tested with: +//! +//! Docker containers with: +//! +//! - 6 CPUs +//! - 7.5G of ram +//! - 2GB of swap +//! +//! [VM](https://github.com/torrust/torrust-tracker/issues/321) with: +//! +//! - 1 core of Intel Xeon Processor (Icelake) +//! - 1G of ram +//! - 25G of disk +//! - Debian 11 +//! - no swap by default +//! +//! Adding swap may help with compilation. See issue [#321](https://github.com/torrust/torrust-tracker/issues/321). +//! +//! ## Prerequisites +//! +//! The tracker has some system dependencies: +//! +//! Since we are using the `openssl` crate with the [vendored feature](https://docs.rs/openssl/latest/openssl/#vendored), +//! enabled, you will need to install the following dependencies: +//! +//! ```text +//! sudo apt-get install pkg-config libssl-dev make +//! ``` +//! +//! If you are using `SQLite3` as database driver, you will need to install the +//! following dependency: +//! +//! ```text +//! sudo apt-get install libsqlite3-dev +//! ``` +//! +//! > **NOTICE**: those are the commands for `Ubuntu`. If you are using a +//! > different OS, you will need to install the equivalent packages. Please +//! > refer to the documentation of your OS. +//! +//! With the default configuration you will need to create the `storage` directory: +//! +//! ```text +//! ./storage/ +//! └── tracker +//! ├── etc +//! ├── lib +//! │   ├── database +//! │   │   └── sqlite3.db +//! │   └── tls +//! └── log +//! ``` +//! +//! The default configuration expects a directory `./storage/tracker/lib/database` to be writable by the tracker process. +//! +//! By default the tracker uses `SQLite` and the database file name `sqlite3.db`. +//! +//! You only need the `tls` directory in case you are setting up SSL for the HTTP tracker or the tracker API. +//! Visit [`HTTP`](crate::servers::http) or [`API`](crate::servers::apis) if you want to know how you can use HTTPS. +//! +//! ## Install from sources +//! +//! First, you need to create a folder to clone the repository. +//! +//! ```text +//! cd /tmp +//! mkdir torrust +//! ``` +//! +//! ```text +//! git clone https://github.com/torrust/torrust-tracker.git \ +//! && cd torrust-tracker \ +//! && cargo build --release \ +//! && mkdir -p ./storage/tracker/etc \ +//! && mkdir -p ./storage/tracker/lib/database \ +//! && mkdir -p ./storage/tracker/lib/tls \ +//! && mkdir -p ./storage/tracker/log +//! ``` +//! +//! To run the tracker we will have to use the command "cargo run" this will +//! compile and after being compiled it will start running the tracker. +//! +//! ```text +//! cargo run +//! ``` +//! +//! ## Run with docker +//! +//! You can run the tracker with a pre-built docker image. Please refer to the +//! [tracker docker documentation](https://github.com/torrust/torrust-tracker/blob/develop/docs/containers.md). +//! +//! # Configuration +//! +//! In order to run the tracker you need to provide the configuration. If you +//! run the tracker without providing the configuration, the tracker will +//! generate the default configuration the first time you run it. It will +//! generate a `tracker.toml` file with in the root directory. +//! +//! The default configuration is: +//! +//! ```toml +//! [logging] +//! threshold = "info" +//! +//! [core] +//! inactive_peer_cleanup_interval = 600 +//! listed = false +//! private = false +//! tracker_usage_statistics = true +//! +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 +//! +//! [core.database] +//! driver = "sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! +//! [core.net] +//! external_ip = "0.0.0.0" +//! on_reverse_proxy = false +//! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true +//! +//! [health_check_api] +//! bind_address = "127.0.0.1:1313" +//!``` +//! +//! The default configuration includes one disabled UDP server, one disabled +//! HTTP server and the enabled API. +//! +//! For more information about each service and options you can visit the +//! documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration). +//! +//! Alternatively to the `tracker.toml` file you can use one environment +//! variable `TORRUST_TRACKER_CONFIG_TOML` to pass the configuration to the tracker: +//! +//! ```text +//! TORRUST_TRACKER_CONFIG_TOML=$(cat ./share/default/config/tracker.development.sqlite3.toml) ./target/release/torrust-tracker +//! ``` +//! +//! In the previous example you are just setting the env var with the contents +//! of the `tracker.toml` file. +//! +//! The env var contains the same data as the `tracker.toml`. It's particularly +//! useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/blob/develop/docs/containers.md). +//! +//! > NOTICE: The `TORRUST_TRACKER_CONFIG_TOML` env var has priority over the `tracker.toml` file. +//! +//! By default, if you don’t specify any `tracker.toml` file, the application +//! will use `./share/default/config/tracker.development.sqlite3.toml`. +//! +//! > IMPORTANT: Every time you change the configuration you need to restart the +//! > service. +//! +//! # Usage +//! +//! Running the tracker with the default configuration and enabling the UDP and +//! HTTP trackers will expose the services on these URLs: +//! +//! - REST API: +//! - UDP tracker: +//! - HTTP tracker: +//! +//! ## API +//! +//! In order to use the tracker API you need to enable it in the configuration: +//! +//! ```toml +//! [http_api] +//! bind_address = "127.0.0.1:1212" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! By default it's enabled on port `1212`. You also need to add access tokens in the configuration: +//! +//! ```toml +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! LABEL = "YOUR_TOKEN" +//! ``` +//! +//! All tokens give full access the the API. Once you have defined you token you can make request adding the token as a `GET` parameter. For example: +//! +//! +//! +//! That endpoint will give you the tracker metrics: +//! +//! ```json +//! { +//! "torrents": 0, +//! "seeders": 0, +//! "completed": 0, +//! "leechers": 0, +//! "tcp4_connections_handled": 0, +//! "tcp4_announces_handled": 0, +//! "tcp4_scrapes_handled": 0, +//! "tcp6_connections_handled": 0, +//! "tcp6_announces_handled": 0, +//! "tcp6_scrapes_handled": 0, +//! "udp4_connections_handled": 0, +//! "udp4_announces_handled": 0, +//! "udp4_scrapes_handled": 0, +//! "udp6_connections_handled": 0, +//! "udp6_announces_handled": 0, +//! "udp6_scrapes_handled": 0 +//! } +//! ``` +//! +//! Refer to the [`API`](crate::servers::apis) documentation for more information about the [`API`](crate::servers::apis) endpoints. +//! +//! ## HTTP tracker +//! +//! The HTTP tracker implements two type of requests: +//! +//! - Announce: +//! - Scrape: +//! +//! In you are using the tracker in `private` or `private_listed` mode you will need to append the authentication key: +//! +//! - Announce: +//! - Scrape: +//! +//! In order to use the HTTP tracker you need to enable at least one server in the configuration: +//! +//! ```toml +//! [[http_trackers]] +//! bind_address = "0.0.0.0:7070" +//! ``` +//! +//! Refer to the [`HTTP`](crate::servers::http) documentation for more information about the [`HTTP`](crate::servers::http) tracker. +//! +//! ### Announce +//! +//! The `announce` request allows a peer to announce itself and obtain a list of peer for an specific torrent. +//! +//! A sample `announce` request: +//! +//! +//! +//! If you want to know more about the `announce` request: +//! +//! - [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [Vuze announce docs](https://wiki.vuze.com/w/Announce) +//! +//! ### Scrape +//! +//! The `scrape` request allows a peer to get swarm metadata for multiple torrents at the same time. +//! +//! A sample `scrape` request for only one torrent: +//! +//! +//! +//! The response contains the swarm metadata for that torrent: +//! +//! - `complete`: the number of active peers that have completed downloading, also known as seeders. Peers from which other peers can get a full copy of the torrent. +//! - `downloaded`: the number of peers that have ever completed downloading. +//! - `incomplete`: the number of active peers that have not completed downloading, also known as leechers. +//! +//! The `scrape` response is a bencoded byte array like the following: +//! +//! ```text +//! d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +//! ``` +//! +//! If you save the response as a file and you open it with a program that can handle binary data you would see: +//! +//! ```text +//! 00000000: 6435 3a66 696c 6573 6432 303a 8100 0000 d5:filesd20:.... +//! 00000010: 0000 0000 0000 0000 0000 0000 0000 0000 ................ +//! 00000020: 6438 3a63 6f6d 706c 6574 6569 3165 3130 d8:completei1e10 +//! 00000030: 3a64 6f77 6e6c 6f61 6465 6469 3065 3130 :downloadedi0e10 +//! 00000040: 3a69 6e63 6f6d 706c 6574 6569 3065 6565 :incompletei0eee +//! 00000050: 65 e +//! ``` +//! +//! `BitTorrent` uses a data formatting specification called [Bencode](https://en.wikipedia.org/wiki/Bencode). +//! +//! If you want to know more about the `scrape` request: +//! +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! - [Vuze scrape docs](https://wiki.vuze.com/w/Scrape) +//! +//! ### Authentication keys +//! +//! If the tracker is running in `private` or `private_listed` mode you will need to provide a valid authentication key. +//! +//! Right now the only way to add new keys is via the REST [`API`](crate::servers::apis). The endpoint `POST /api/vi/key/:duration_in_seconds` +//! will return an expiring key that will be valid for `duration_in_seconds` seconds. +//! +//! Using `curl` you can create a 2-minute valid auth key: +//! +//! ```text +//! $ curl -X POST "http://127.0.0.1:1212/api/v1/key/120?token=MyAccessToken" +//! ``` +//! +//! Response: +//! +//! ```json +//! { +//! "key": "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7", +//! "valid_until": 1679334334, +//! "expiry_time": "2023-03-20 17:45:34.712077008 UTC" +//! } +//! ``` +//! +//! You can also use the Torrust Tracker together with the [Torrust Index](https://github.com/torrust/torrust-index). If that's the case, +//! the Index will create the keys by using the tracker [API](crate::servers::apis). +//! +//! ## UDP tracker +//! +//! The UDP tracker also implements two type of requests: +//! +//! - Announce: +//! - Scrape: +//! +//! In order to use the UDP tracker you need to enable at least one server in the configuration: +//! +//! ```toml +//! [[udp_trackers]] +//! bind_address = "0.0.0.0:6969" +//! ``` +//! +//! Refer to the [`UDP`](crate::servers::udp) documentation for more information about the [`UDP`](crate::servers::udp) tracker. +//! +//! If you want to know more about the UDP tracker protocol: +//! +//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +//! +//! # Components +//! +//! Torrust Tracker has four main components: +//! +//! - The core tracker [`core`] +//! - The tracker REST [`API`](crate::servers::apis) +//! - The [`UDP`](crate::servers::udp) tracker +//! - The [`HTTP`](crate::servers::http) tracker +//! +//! ![Torrust Tracker Components](https://raw.githubusercontent.com/torrust/torrust-tracker/main/docs/media/torrust-tracker-components.png) +//! +//! ## Core tracker +//! +//! The core tracker is the main containing the tracker generic tracker logic. +//! +//! The core tracker handles: +//! +//! - Authentication with keys +//! - Authorization using a torrent whitelist +//! - Statistics +//! - Persistence +//! +//! See [`core`] for more details on the [`core`] module. +//! +//! ## Tracker API +//! +//! The tracker exposes a REST API. The API has four resource groups: +//! +//! - Authentication keys: to handle the keys for the HTTP tracker +//! - Statistics: to get the tracker metrics like requests counters +//! - Torrents: to get peers for a torrent +//! - Whitelist: to handle the torrent whitelist when the tracker runs on `listed` or `private_listed` mode +//! +//! See [`API`](crate::servers::apis) for more details on the REST API. +//! +//! ## UDP tracker +//! +//! UDP trackers are trackers with focus on performance. By Using UDP instead of HTTP the tracker removed the overhead +//! of opening and closing TCP connections. It also reduces the response size. +//! +//! You can find more information about UDP tracker on: +//! +//! - [Wikipedia: UDP tracker](https://en.wikipedia.org/wiki/UDP_tracker) +//! - [BEP 15: UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +//! +//! See [`UDP`](crate::servers::udp) for more details on the UDP tracker. +//! +//! ## HTTP tracker +//! +//! HTTP tracker was the original tracker specification defined on the [BEP 3]((https://www.bittorrent.org/beps/bep_0003.html)). +//! +//! See [`HTTP`](crate::servers::http) for more details on the HTTP tracker. +//! +//! You can find more information about UDP tracker on: +//! +//! - [Wikipedia: `BitTorrent` tracker](https://en.wikipedia.org/wiki/BitTorrent_tracker) +//! - [BEP 3: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! +//! # Implemented BEPs +//! +//! BEP stands for `BitTorrent` Enhancement Proposal. BEPs are documents providing information to the `BitTorrent` +//! community or describing a new feature for the `BitTorrent` protocols. +//! +//! You can find all BEPs on +//! +//! Torrust Tracker implements these BEPs: +//! +//! - [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The `BitTorrent` Protocol +//! - [BEP 7](https://www.bittorrent.org/beps/bep_0007.html): IPv6 Support +//! - [BEP 15](https://www.bittorrent.org/beps/bep_0015.html): UDP Tracker Protocol for `BitTorrent` +//! - [BEP 23](https://www.bittorrent.org/beps/bep_0023.html): Tracker Returns Compact Peer Lists +//! - [BEP 27](https://www.bittorrent.org/beps/bep_0027.html): Private Torrents +//! - [BEP 48](https://www.bittorrent.org/beps/bep_0048.html): Tracker Protocol Extension: Scrape +//! +//! # Contributing +//! +//! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-tracker/pulls). +//! +//! # Documentation +//! +//! You can find this documentation on [docs.rs](https://docs.rs/torrust-tracker/). +//! +//! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-tracker/pulls). +//! +//! In addition to the production code documentation you can find a lot of +//! examples on the integration and unit tests. -pub use self::config::*; -pub use self::udp_server::*; -pub use self::http_server::*; -pub use self::tracker::*; -pub use self::http_api_server::*; -pub use self::common::*; -pub use self::response::*; +use torrust_tracker_clock::{clock, time_extent}; + +pub mod app; +pub mod bootstrap; +pub mod console; +pub mod core; +pub mod servers; +pub mod shared; + +#[macro_use] +extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/logging.rs b/src/logging.rs deleted file mode 100644 index 580e35094..000000000 --- a/src/logging.rs +++ /dev/null @@ -1,39 +0,0 @@ -use log::info; -use crate::Configuration; - -pub fn setup_logging(cfg: &Configuration) { - let log_level = match &cfg.log_level { - None => log::LevelFilter::Info, - Some(level) => { - match level.as_str() { - "off" => log::LevelFilter::Off, - "trace" => log::LevelFilter::Trace, - "debug" => log::LevelFilter::Debug, - "info" => log::LevelFilter::Info, - "warn" => log::LevelFilter::Warn, - "error" => log::LevelFilter::Error, - _ => { - panic!("Unknown log level encountered: '{}'", level.as_str()); - } - } - } - }; - - if let Err(_err) = fern::Dispatch::new() - .format(|out, message, record| { - out.finish(format_args!( - "{} [{}][{}] {}", - chrono::Local::now().format("%+"), - record.target(), - record.level(), - message - )) - }) - .level(log_level) - .chain(std::io::stdout()) - .apply() - { - panic!("Failed to initialize logging.") - } - info!("logging initialized."); -} diff --git a/src/main.rs b/src/main.rs index 74a905c0d..e0b7bc4ab 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,116 +1,19 @@ -use log::{info}; -use torrust_tracker::{http_api_server, Configuration, TorrentTracker, UdpServer, HttpTrackerConfig, UdpTrackerConfig, HttpApiConfig, logging}; -use std::sync::Arc; -use tokio::task::JoinHandle; -use torrust_tracker::http_server::HttpServer; +use torrust_tracker::{app, bootstrap}; #[tokio::main] async fn main() { - let config = match Configuration::load_from_file() { - Ok(config) => Arc::new(config), - Err(error) => { - panic!("{}", error) - } - }; - - logging::setup_logging(&config); - - // the singleton torrent tracker that gets passed to the HTTP and UDP server - let tracker = Arc::new(TorrentTracker::new(config.clone())); - - // start torrent cleanup job (periodically removes old peers) - let _torrent_cleanup_job = start_torrent_cleanup_job(config.clone(), tracker.clone()).unwrap(); - - // start HTTP API server - if let Some(http_api_config) = &config.http_api { - if http_api_config.enabled { - let _api_server = start_api_server(&http_api_config, tracker.clone()); - } - }; + let (config, tracker) = bootstrap::app::setup(); - // check which tracker to run, UDP (Default) or HTTP - let _tracker_server = if let Some(http_config) = &config.http_tracker { - if http_config.enabled { - start_http_tracker_server(http_config, tracker.clone()) - } else { - start_udp_tracker_server(&config.udp_tracker, tracker.clone()).await - } - } else { - start_udp_tracker_server(&config.udp_tracker, tracker.clone()).await - }; - - let ctrl_c = tokio::signal::ctrl_c(); + let jobs = app::start(&config, tracker).await; + // handle the signals tokio::select! { - _ = _tracker_server => { panic!("Tracker server exited.") }, - _ = ctrl_c => { info!("Torrust shutting down..") } - } -} - -fn start_torrent_cleanup_job(config: Arc, tracker: Arc) -> Option> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); - let interval = config.cleanup_interval.unwrap_or(600); + _ = tokio::signal::ctrl_c() => { + tracing::info!("Torrust shutting down ..."); - return Some(tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; // first tick is immediate... - // periodically call tracker.cleanup_torrents() - loop { - interval.tick().await; - if let Some(tracker) = weak_tracker.upgrade() { - tracker.cleanup_torrents().await; - } else { - break; - } + // Await for all jobs to shutdown + futures::future::join_all(jobs).await; + tracing::info!("Torrust successfully shutdown."); } - })) -} - -fn start_api_server(config: &HttpApiConfig, tracker: Arc) -> JoinHandle<()> { - info!("Starting HTTP API server on: {}", config.bind_address); - let bind_addr = config.bind_address.parse::().unwrap(); - - tokio::spawn(async move { - let server = http_api_server::build_server(tracker); - server.bind(bind_addr).await; - }) -} - -fn start_http_tracker_server(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - info!("Starting HTTP server on: {}", config.bind_address); - let http_tracker = Arc::new(HttpServer::new(tracker)); - let bind_addr = config.bind_address.parse::().unwrap(); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - tokio::spawn(async move { - // run with tls if ssl_enabled and cert and key path are set - if ssl_enabled { - info!("SSL enabled."); - warp::serve(HttpServer::routes(http_tracker)) - .tls() - .cert_path(ssl_cert_path.as_ref().unwrap()) - .key_path(ssl_key_path.as_ref().unwrap()) - .run(bind_addr).await; - } else { - warp::serve(HttpServer::routes(http_tracker)) - .run(bind_addr).await; - } - }) -} - -async fn start_udp_tracker_server(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { - info!("Starting UDP server on: {}", config.bind_address); - let udp_server = UdpServer::new(tracker).await.unwrap_or_else(|e| { - panic!("Could not start UDP server: {}", e); - }); - - info!("Starting UDP tracker server.."); - tokio::spawn(async move { - if let Err(e) = udp_server.accept_packets().await { - panic!("Could not start UDP server: {}", e); - } - }) + } } diff --git a/src/response.rs b/src/response.rs deleted file mode 100644 index 9734e3769..000000000 --- a/src/response.rs +++ /dev/null @@ -1,126 +0,0 @@ -use std; -use std::io::{Write}; -use std::net::{SocketAddr}; -use byteorder::{NetworkEndian, WriteBytesExt}; -use super::common::*; -use std::io; -use crate::TorrentPeer; - -#[derive(PartialEq, Eq, Clone, Debug)] -pub enum UdpResponse { - Connect(UdpConnectionResponse), - Announce(UdpAnnounceResponse), - Scrape(UdpScrapeResponse), - Error(UdpErrorResponse), -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpConnectionResponse { - pub action: Actions, - pub transaction_id: TransactionId, - pub connection_id: ConnectionId, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpAnnounceResponse { - pub action: Actions, - pub transaction_id: TransactionId, - pub interval: u32, - pub leechers: u32, - pub seeders: u32, - pub peers: Vec, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpScrapeResponse { - pub action: Actions, - pub transaction_id: TransactionId, - pub torrent_stats: Vec, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpScrapeResponseEntry { - pub seeders: i32, - pub completed: i32, - pub leechers: i32, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct UdpErrorResponse { - pub action: Actions, - pub transaction_id: TransactionId, - pub message: String, -} - -impl From for UdpResponse { - fn from(r: UdpConnectionResponse) -> Self { - Self::Connect(r) - } -} - -impl From for UdpResponse { - fn from(r: UdpAnnounceResponse) -> Self { - Self::Announce(r) - } -} - -impl From for UdpResponse { - fn from(r: UdpScrapeResponse) -> Self { - Self::Scrape(r) - } -} - -impl From for UdpResponse { - fn from(r: UdpErrorResponse) -> Self { - Self::Error(r) - } -} - -impl UdpResponse { - pub fn write_to_bytes(self, bytes: &mut impl Write) -> Result<(), io::Error> { - match self { - UdpResponse::Connect(r) => { - bytes.write_i32::(0)?; // 0 = connect - bytes.write_i32::(r.transaction_id.0)?; - bytes.write_i64::(r.connection_id.0)?; - }, - UdpResponse::Announce(r) => { - bytes.write_i32::(1)?; // 1 = announce - bytes.write_i32::(r.transaction_id.0)?; - bytes.write_u32::(r.interval)?; - bytes.write_u32::(r.leechers)?; - bytes.write_u32::(r.seeders)?; - - for peer in r.peers { - match peer.peer_addr { - SocketAddr::V4(socket_addr) => { - bytes.write_all(&socket_addr.ip().octets())?; - bytes.write_u16::(socket_addr.port())?; - } - SocketAddr::V6(socket_addr) => { - bytes.write_all(&socket_addr.ip().octets())?; - bytes.write_u16::(socket_addr.port())?; - } - } - } - }, - UdpResponse::Scrape(r) => { - bytes.write_i32::(2)?; // 2 = scrape - bytes.write_i32::(r.transaction_id.0)?; - - for torrent_stat in r.torrent_stats { - bytes.write_i32::(torrent_stat.seeders)?; - bytes.write_i32::(torrent_stat.completed)?; - bytes.write_i32::(torrent_stat.leechers)?; - } - }, - UdpResponse::Error(r) => { - bytes.write_i32::(3)?; - bytes.write_i32::(r.transaction_id.0)?; - bytes.write_all(r.message.as_bytes())?; - }, - } - - Ok(()) - } -} diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs new file mode 100644 index 000000000..0451b46c0 --- /dev/null +++ b/src/servers/apis/mod.rs @@ -0,0 +1,181 @@ +//! The tracker REST API with all its versions. +//! +//! > **NOTICE**: This API should not be exposed directly to the internet, it is +//! > intended for internal use only. +//! +//! Endpoints for the latest API: [v1]. +//! +//! All endpoints require an authorization token which must be set in the +//! configuration before running the tracker. The default configuration uses +//! `?token=MyAccessToken`. Refer to [Authentication](#authentication) for more +//! information. +//! +//! # Table of contents +//! +//! - [Configuration](#configuration) +//! - [Authentication](#authentication) +//! - [Versioning](#versioning) +//! - [Endpoints](#endpoints) +//! - [Documentation](#documentation) +//! +//! # Configuration +//! +//! The configuration file has a [`[http_api]`](torrust_tracker_configuration::HttpApi) +//! section that can be used to enable the API. +//! +//! ```toml +//! [http_api] +//! bind_address = "0.0.0.0:1212" +//! +//! [http_api.tsl_config] +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! Refer to [`torrust-tracker-configuration`](torrust_tracker_configuration) +//! for more information about the API configuration. +//! +//! When you run the tracker with enabled API, you will see the following message: +//! +//! ```text +//! Loading configuration from config file ./tracker.toml +//! 023-03-28T12:19:24.963054069+01:00 [torrust_tracker::bootstrap::logging][INFO] Logging initialized +//! ... +//! 023-03-28T12:19:24.964138723+01:00 [torrust_tracker::bootstrap::jobs::tracker_apis][INFO] Starting Torrust APIs server on: http://0.0.0.0:1212 +//! ``` +//! +//! The API server will be available on the address specified in the configuration. +//! +//! You can test the API by loading the following URL on a browser: +//! +//! +//! +//! Or using `curl`: +//! +//! ```bash +//! $ curl -s "http://0.0.0.0:1212/api/v1/stats?token=MyAccessToken" +//! ``` +//! +//! The response will be a JSON object. For example, the [tracker statistics +//! endpoint](crate::servers::apis::v1::context::stats#get-tracker-statistics): +//! +//! ```json +//! { +//! "torrents": 0, +//! "seeders": 0, +//! "completed": 0, +//! "leechers": 0, +//! "tcp4_connections_handled": 0, +//! "tcp4_announces_handled": 0, +//! "tcp4_scrapes_handled": 0, +//! "tcp6_connections_handled": 0, +//! "tcp6_announces_handled": 0, +//! "tcp6_scrapes_handled": 0, +//! "udp4_connections_handled": 0, +//! "udp4_announces_handled": 0, +//! "udp4_scrapes_handled": 0, +//! "udp6_connections_handled": 0, +//! "udp6_announces_handled": 0, +//! "udp6_scrapes_handled": 0 +//! } +//! ``` +//! +//! # Authentication +//! +//! The API supports authentication using a GET parameter token. +//! +//! +//! +//! You can set as many tokens as you want in the configuration file: +//! +//! ```toml +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! The token label is used to identify the token. All tokens have full access +//! to the API. +//! +//! Refer to [`torrust-tracker-configuration`](torrust_tracker_configuration) +//! for more information about the API configuration and to the +//! [`auth`](crate::servers::apis::v1::middlewares::auth) middleware for more +//! information about the authentication process. +//! +//! # Setup SSL (optional) +//! +//! The API server supports SSL. You can enable it by adding the `tsl_config` +//! section to the configuration. +//! +//! ```toml +//! [http_api] +//! bind_address = "0.0.0.0:1212" +//! +//! [http_api.tsl_config] +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! > **NOTICE**: If you are using a reverse proxy like NGINX, you can skip this +//! > step and use NGINX for the SSL instead. See +//! > [other alternatives to Nginx/certbot](https://github.com/torrust/torrust-tracker/discussions/131) +//! +//! > **NOTICE**: You can generate a self-signed certificate for localhost using +//! > OpenSSL. See [Let's Encrypt](https://letsencrypt.org/docs/certificates-for-localhost/). +//! > That's particularly useful for testing purposes. Once you have the certificate +//! > you need to set the [`ssl_cert_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_cert_path) +//! > and [`ssl_key_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_key_path) +//! > options in the configuration file with the paths to the certificate +//! > (`localhost.crt`) and key (`localhost.key`) files. +//! +//! # Versioning +//! +//! The API is versioned and each version has its own module. +//! The API server runs all the API versions on the same server using +//! the same port. Currently there is only one API version: [v1] +//! but a version [`v2`](https://github.com/torrust/torrust-tracker/issues/144) +//! is planned. +//! +//! # Endpoints +//! +//! Refer to the [v1] module for the list of available +//! API endpoints. +//! +//! # Documentation +//! +//! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-tracker/pulls). +//! +//! > **NOTICE**: we are using [curl](https://curl.se/) in the API examples. +//! > And you have to use quotes around the URL in order to avoid unexpected +//! > errors. For example: `curl "http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken"`. +pub mod routes; +pub mod server; +pub mod v1; + +use serde::{Deserialize, Serialize}; + +pub const API_LOG_TARGET: &str = "API"; + +/// The info hash URL path parameter. +/// +/// Some API endpoints require an info hash as a path parameter. +/// +/// For example: `http://localhost:1212/api/v1/torrent/{info_hash}`. +/// +/// The info hash represents teh value collected from the URL path parameter. +/// It does not include validation as this is done by the API endpoint handler, +/// in order to provide a more specific error message. +#[derive(Deserialize)] +pub struct InfoHashParam(pub String); + +/// The version of the HTTP Api. +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +pub enum Version { + /// The `v1` version of the HTTP Api. + V1, +} diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs new file mode 100644 index 000000000..327cab0c5 --- /dev/null +++ b/src/servers/apis/routes.rs @@ -0,0 +1,88 @@ +//! API routes. +//! +//! It loads all the API routes for all API versions and adds the authentication +//! middleware to them. +//! +//! All the API routes have the `/api` prefix and the version number as the +//! first path segment. For example: `/api/v1/torrents`. +use std::sync::Arc; +use std::time::Duration; + +use axum::error_handling::HandleErrorLayer; +use axum::http::HeaderName; +use axum::response::Response; +use axum::routing::get; +use axum::{middleware, BoxError, Router}; +use hyper::{Request, StatusCode}; +use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; +use tower::timeout::TimeoutLayer; +use tower::ServiceBuilder; +use tower_http::compression::CompressionLayer; +use tower_http::propagate_header::PropagateHeaderLayer; +use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{instrument, Level, Span}; + +use super::v1; +use super::v1::context::health_check::handlers::health_check_handler; +use super::v1::middlewares::auth::State; +use crate::core::Tracker; +use crate::servers::apis::API_LOG_TARGET; + +/// Add all API routes to the router. +#[allow(clippy::needless_pass_by_value)] +#[instrument(skip(tracker, access_tokens))] +pub fn router(tracker: Arc, access_tokens: Arc) -> Router { + let router = Router::new(); + + let api_url_prefix = "/api"; + + let router = v1::routes::add(api_url_prefix, router, tracker.clone()); + + let state = State { access_tokens }; + + router + .layer(middleware::from_fn_with_state(state, v1::middlewares::auth::auth)) + .route(&format!("{api_url_prefix}/health_check"), get(health_check_handler)) + .layer(CompressionLayer::new()) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) + .on_request(|request: &Request, _span: &Span| { + let method = request.method().to_string(); + let uri = request.uri().to_string(); + let request_id = request + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + + tracing::span!( + target: API_LOG_TARGET, + tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); + }) + .on_response(|response: &Response, latency: Duration, _span: &Span| { + let status_code = response.status(); + let request_id = response + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + let latency_ms = latency.as_millis(); + + tracing::span!( + target: API_LOG_TARGET, + tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); + }), + ) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer( + ServiceBuilder::new() + // this middleware goes above `TimeoutLayer` because it will receive + // errors returned by `TimeoutLayer` + .layer(HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT })) + .layer(TimeoutLayer::new(DEFAULT_TIMEOUT)), + ) +} diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs new file mode 100644 index 000000000..31220f497 --- /dev/null +++ b/src/servers/apis/server.rs @@ -0,0 +1,330 @@ +//! Logic to run the HTTP API server. +//! +//! It contains two main structs: `ApiServer` and `Launcher`, +//! and two main functions: `start` and `start_tls`. +//! +//! The `ApiServer` struct is responsible for: +//! - Starting and stopping the server. +//! - Storing the configuration. +//! +//! `ApiServer` relies on a launcher to start the actual server. +/// +/// 1. `ApiServer::start` -> spawns new asynchronous task. +/// 2. `Launcher::start` -> starts the server on the spawned task. +/// +/// The `Launcher` struct is responsible for: +/// +/// - Knowing how to start the server with graceful shutdown. +/// +/// For the time being the `ApiServer` and `Launcher` are only used in tests +/// where we need to start and stop the server multiple times. In production +/// code and the main application uses the `start` and `start_tls` functions +/// to start the servers directly since we do not need to control the server +/// when it's running. In the future we might need to control the server, +/// for example, to restart it to apply new configuration changes, to remotely +/// shutdown the server, etc. +use std::net::SocketAddr; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; +use derive_more::derive::Display; +use derive_more::Constructor; +use futures::future::BoxFuture; +use thiserror::Error; +use tokio::sync::oneshot::{Receiver, Sender}; +use torrust_tracker_configuration::AccessTokens; +use tracing::{instrument, Level}; + +use super::routes::router; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::apis::API_LOG_TARGET; +use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; +use crate::servers::logging::STARTED_ON; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; +use crate::servers::signals::{graceful_shutdown, Halted}; + +/// Errors that can occur when starting or stopping the API server. +#[derive(Debug, Error)] +pub enum Error { + #[error("Error when starting or stopping the API server")] + FailedToStartOrStop(String), +} + +/// An alias for the `ApiServer` struct with the `Stopped` state. +#[allow(clippy::module_name_repetitions)] +pub type StoppedApiServer = ApiServer; + +/// An alias for the `ApiServer` struct with the `Running` state. +#[allow(clippy::module_name_repetitions)] +pub type RunningApiServer = ApiServer; + +/// A struct responsible for starting and stopping an API server with a +/// specific configuration and keeping track of the started server. +/// +/// It's a state machine that can be in one of two +/// states: `Stopped` or `Running`. +#[allow(clippy::module_name_repetitions)] +#[derive(Debug, Display)] +pub struct ApiServer +where + S: std::fmt::Debug + std::fmt::Display, +{ + pub state: S, +} + +/// The `Stopped` state of the `ApiServer` struct. +#[derive(Debug, Display)] +#[display("Stopped: {launcher}")] +pub struct Stopped { + launcher: Launcher, +} + +/// The `Running` state of the `ApiServer` struct. +#[derive(Debug, Display)] +#[display("Running (with local address): {local_addr}")] +pub struct Running { + pub local_addr: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: tokio::task::JoinHandle, +} + +impl Running { + #[must_use] + pub fn new( + local_addr: SocketAddr, + halt_task: tokio::sync::oneshot::Sender, + task: tokio::task::JoinHandle, + ) -> Self { + Self { + local_addr, + halt_task, + task, + } + } +} + +impl ApiServer { + #[must_use] + pub fn new(launcher: Launcher) -> Self { + Self { + state: Stopped { launcher }, + } + } + + /// Starts the API server with the given configuration. + /// + /// # Errors + /// + /// It would return an error if no `SocketAddr` is returned after launching the server. + /// + /// # Panics + /// + /// It would panic if the bound socket address cannot be sent back to this starter. + #[instrument(skip(self, tracker, form, access_tokens), err, ret(Display, level = Level::INFO))] + pub async fn start( + self, + tracker: Arc, + form: ServiceRegistrationForm, + access_tokens: Arc, + ) -> Result, Error> { + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + let launcher = self.state.launcher; + + let task = tokio::spawn(async move { + tracing::debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); + + let _task = launcher.start(tracker, access_tokens, tx_start, rx_halt).await; + + tracing::debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); + + launcher + }); + + let api_server = match rx_start.await { + Ok(started) => { + form.send(ServiceRegistration::new(started.address, check_fn)) + .expect("it should be able to send service registration"); + + ApiServer { + state: Running::new(started.address, tx_halt, task), + } + } + Err(err) => { + let msg = format!("Unable to start API server: {err}"); + tracing::error!("{}", msg); + panic!("{}", msg); + } + }; + + Ok(api_server) + } +} + +impl ApiServer { + /// Stops the API server. + /// + /// # Errors + /// + /// It would return an error if the channel for the task killer signal was closed. + #[instrument(skip(self), err, ret(Display, level = Level::INFO))] + pub async fn stop(self) -> Result, Error> { + self.state + .halt_task + .send(Halted::Normal) + .map_err(|_| Error::FailedToStartOrStop("Task killer channel was closed.".to_string()))?; + + let launcher = self.state.task.await.map_err(|e| Error::FailedToStartOrStop(e.to_string()))?; + + Ok(ApiServer { + state: Stopped { launcher }, + }) + } +} + +/// Checks the Health by connecting to the API service endpoint. +/// +/// # Errors +/// +/// This function will return an error if unable to connect. +/// Or if there request returns an error code. +#[must_use] +#[instrument(skip())] +pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { + let url = format!("http://{binding}/api/health_check"); // DevSkim: ignore DS137138 + + let info = format!("checking api health check at: {url}"); + + let job = tokio::spawn(async move { + match reqwest::get(url).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err.to_string()), + } + }); + ServiceHealthCheckJob::new(*binding, info, job) +} + +/// A struct responsible for starting the API server. +#[derive(Constructor, Debug)] +pub struct Launcher { + bind_to: SocketAddr, + tls: Option, +} + +impl std::fmt::Display for Launcher { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.tls.is_some() { + write!(f, "(with socket): {}, using TLS", self.bind_to,) + } else { + write!(f, "(with socket): {}, without TLS", self.bind_to,) + } + } +} + +impl Launcher { + /// Starts the API server with graceful shutdown. + /// + /// If TLS is enabled in the configuration, it will start the server with + /// TLS. See [`torrust-tracker-configuration`](torrust_tracker_configuration) + /// for more information about configuration. + /// + /// # Panics + /// + /// Will panic if unable to bind to the socket, or unable to get the address of the bound socket. + /// Will also panic if unable to send message regarding the bound socket address. + #[instrument(skip(self, tracker, access_tokens, tx_start, rx_halt))] + pub fn start( + &self, + tracker: Arc, + access_tokens: Arc, + tx_start: Sender, + rx_halt: Receiver, + ) -> BoxFuture<'static, ()> { + let router = router(tracker, access_tokens); + let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); + + let handle = Handle::new(); + + tokio::task::spawn(graceful_shutdown( + handle.clone(), + rx_halt, + format!("Shutting down tracker API server on socket address: {address}"), + )); + + let tls = self.tls.clone(); + let protocol = if tls.is_some() { "https" } else { "http" }; + + tracing::info!(target: API_LOG_TARGET, "Starting on {protocol}://{}", address); + + let running = Box::pin(async { + match tls { + Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .handle(handle) + // The TimeoutAcceptor is commented because TSL does not work with it. + // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 + //.acceptor(TimeoutAcceptor) + .serve(router.into_make_service_with_connect_info::()) + .await + .expect("Axum server for tracker API crashed."), + None => custom_axum_server::from_tcp_with_timeouts(socket) + .handle(handle) + .acceptor(TimeoutAcceptor) + .serve(router.into_make_service_with_connect_info::()) + .await + .expect("Axum server for tracker API crashed."), + } + }); + + tracing::info!(target: API_LOG_TARGET, "{STARTED_ON} {protocol}://{}", address); + + tx_start + .send(Started { address }) + .expect("the HTTP(s) Tracker API service should not be dropped"); + + running + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration::ephemeral_public; + + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::make_rust_tls; + use crate::servers::apis::server::{ApiServer, Launcher}; + use crate::servers::registar::Registar; + + #[tokio::test] + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_public()); + let config = &cfg.http_api.clone().unwrap(); + + let tracker = initialize_with_configuration(&cfg); + + let bind_to = config.bind_address; + + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("tls config failed")); + + let access_tokens = Arc::new(config.access_tokens.clone()); + + let stopped = ApiServer::new(Launcher::new(bind_to, tls)); + + let register = &Registar::default(); + + let started = stopped + .start(tracker, register.give_form(), access_tokens) + .await + .expect("it should start the server"); + let stopped = started.stop().await.expect("it should stop the server"); + + assert_eq!(stopped.state.launcher.bind_to, bind_to); + } +} diff --git a/src/servers/apis/v1/context/auth_key/forms.rs b/src/servers/apis/v1/context/auth_key/forms.rs new file mode 100644 index 000000000..5dfea6e80 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/forms.rs @@ -0,0 +1,22 @@ +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DefaultOnNull}; + +/// This type contains the info needed to add a new tracker key. +/// +/// You can upload a pre-generated key or let the app to generate a new one. +/// You can also set an expiration date or leave it empty (`None`) if you want +/// to create permanent key that does not expire. +#[serde_as] +#[derive(Serialize, Deserialize, Debug)] +pub struct AddKeyForm { + /// The pre-generated key. Use `None` (null in json) to generate a random key. + #[serde_as(deserialize_as = "DefaultOnNull")] + #[serde(rename = "key")] + pub opt_key: Option, + + /// How long the key will be valid in seconds. Use `None` (null in json) for + /// permanent keys. + #[serde_as(deserialize_as = "DefaultOnNull")] + #[serde(rename = "seconds_valid")] + pub opt_seconds_valid: Option, +} diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs new file mode 100644 index 000000000..fed3ad301 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -0,0 +1,135 @@ +//! API handlers for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use axum::extract::{self, Path, State}; +use axum::response::Response; +use serde::Deserialize; + +use super::forms::AddKeyForm; +use super::responses::{ + auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, + invalid_auth_key_duration_response, invalid_auth_key_response, +}; +use crate::core::auth::Key; +use crate::core::{AddKeyRequest, Tracker}; +use crate::servers::apis::v1::context::auth_key::resources::AuthKey; +use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; + +/// It handles the request to add a new authentication key. +/// +/// It returns these types of responses: +/// +/// - `200` with a json [`AuthKey`] +/// resource. If the key was generated successfully. +/// - `400` with an error if the key couldn't been added because of an invalid +/// request. +/// - `500` with serialized error in debug format. If the key couldn't be +/// generated. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) +/// for more information about this endpoint. +pub async fn add_auth_key_handler( + State(tracker): State>, + extract::Json(add_key_form): extract::Json, +) -> Response { + match tracker + .add_peer_key(AddKeyRequest { + opt_key: add_key_form.opt_key.clone(), + opt_seconds_valid: add_key_form.opt_seconds_valid, + }) + .await + { + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(err) => match err { + crate::core::error::PeerKeyError::DurationOverflow { seconds_valid } => { + invalid_auth_key_duration_response(seconds_valid) + } + crate::core::error::PeerKeyError::InvalidKey { key, source } => invalid_auth_key_response(&key, source), + crate::core::error::PeerKeyError::DatabaseError { source } => failed_to_generate_key_response(source), + }, + } +} + +/// It handles the request to generate a new authentication key. +/// +/// It returns two types of responses: +/// +/// - `200` with an json [`AuthKey`] +/// resource. If the key was generated successfully. +/// - `500` with serialized error in debug format. If the key couldn't be +/// generated. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) +/// for more information about this endpoint. +/// +/// This endpoint has been deprecated. Use [`add_auth_key_handler`]. +pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { + let seconds_valid = seconds_valid_or_key; + match tracker.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(e) => failed_to_generate_key_response(e), + } +} + +/// A container for the `key` parameter extracted from the URL PATH. +/// +/// It does not perform any validation, it just stores the value. +/// +/// In the current API version, the `key` parameter can be either a valid key +/// like `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6` or the number of seconds the +/// key will be valid, for example two minutes `120`. +/// +/// For example, the `key` is used in the following requests: +/// +/// - `POST /api/v1/key/120`. It will generate a new key valid for two minutes. +/// - `DELETE /api/v1/key/xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. It will delete the +/// key `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. +/// +/// > **NOTICE**: this may change in the future, in the [API v2](https://github.com/torrust/torrust-tracker/issues/144). +#[derive(Deserialize)] +pub struct KeyParam(String); + +/// It handles the request to delete an authentication key. +/// +/// It returns two types of responses: +/// +/// - `200` with an json [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) +/// response. If the key was deleted successfully. +/// - `500` with serialized error in debug format. If the key couldn't be +/// deleted. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#delete-an-authentication-key) +/// for more information about this endpoint. +pub async fn delete_auth_key_handler( + State(tracker): State>, + Path(seconds_valid_or_key): Path, +) -> Response { + match Key::from_str(&seconds_valid_or_key.0) { + Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), + Ok(key) => match tracker.remove_auth_key(&key).await { + Ok(()) => ok_response(), + Err(e) => failed_to_delete_key_response(e), + }, + } +} + +/// It handles the request to reload the authentication keys from the database +/// into memory. +/// +/// It returns two types of responses: +/// +/// - `200` with an json [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) +/// response. If the keys were successfully reloaded. +/// - `500` with serialized error in debug format. If the they couldn't be +/// reloaded. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#reload-authentication-keys) +/// for more information about this endpoint. +pub async fn reload_keys_handler(State(tracker): State>) -> Response { + match tracker.load_keys_from_database().await { + Ok(()) => ok_response(), + Err(e) => failed_to_reload_keys_response(e), + } +} diff --git a/src/servers/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs new file mode 100644 index 000000000..b4112f21f --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/mod.rs @@ -0,0 +1,133 @@ +//! Authentication keys API context. +//! +//! Authentication keys are used to authenticate HTTP tracker `announce` and +//! `scrape` requests. +//! +//! When the tracker is running in `private` mode, the authentication keys are +//! required to announce and scrape torrents. +//! +//! A sample `announce` request **without** authentication key: +//! +//! +//! +//! A sample `announce` request **with** authentication key: +//! +//! +//! +//! # Endpoints +//! +//! - [Generate a new authentication key](#generate-a-new-authentication-key) +//! - [Delete an authentication key](#delete-an-authentication-key) +//! - [Reload authentication keys](#reload-authentication-keys) +//! +//! # Generate a new authentication key +//! +//! `POST /keys` +//! +//! It generates a new authentication key or upload a pre-generated key. +//! +//! **POST parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `key` | 32-char string (0-9, a-z, A-Z) or `null` | The optional pre-generated key. | Yes | `Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z7` or `null` +//! `seconds_valid` | positive integer or `null` | The number of seconds the key will be valid. | Yes | `3600` or `null` +//! +//! > **NOTICE**: the `key` and `seconds_valid` fields are optional. If `key` is not provided the tracker +//! > will generated a random one. If `seconds_valid` field is not provided the key will be permanent. You can use the `null` value. +//! +//! **Example request** +//! +//! ```bash +//! curl -X POST http://localhost:1212/api/v1/keys?token=MyAccessToken \ +//! -H "Content-Type: application/json" \ +//! -d '{ +//! "key": "xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6", +//! "seconds_valid": 7200 +//! }' +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "key": "xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6", +//! "valid_until": 1680009900, +//! "expiry_time": "2023-03-28 13:25:00.058085050 UTC" +//! } +//! ``` +//! +//! > **NOTICE**: `valid_until` and `expiry_time` represent the same time. +//! > `valid_until` is the number of seconds since the Unix epoch +//! > ([timestamp](https://en.wikipedia.org/wiki/Timestamp)), while `expiry_time` +//! > is the human-readable time ([ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html)). +//! +//! **Resource** +//! +//! Refer to the API [`AuthKey`](crate::servers::apis::v1::context::auth_key::resources::AuthKey) +//! resource for more information about the response attributes. +//! +//! # Delete an authentication key +//! +//! `DELETE /key/:key` +//! +//! It deletes a previously generated authentication key. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `key` | 40-char string | The `key` to remove. | Yes | `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6` +//! +//! **Example request** +//! +//! ```bash +//! curl -X DELETE "http://127.0.0.1:1212/api/v1/key/xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` +//! +//! It you try to delete a non-existent key, the response will be an error with +//! a `500` status code. +//! +//! **Example error response** `500` +//! +//! ```text +//! Unhandled rejection: Err { reason: "failed to delete key: Failed to remove record from Sqlite3 database, error-code: 0, src/tracker/databases/sqlite.rs:267:27" } +//! ``` +//! +//! > **NOTICE**: a `500` status code will be returned and the body is not a +//! > valid JSON. It's a text body containing the serialized-to-display error +//! > message. +//! +//! # Reload authentication keys +//! +//! `GET /keys/reload` +//! +//! The tracker persists the authentication keys in a database. This endpoint +//! reloads the keys from the database. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/keys/reload?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` +pub mod forms; +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs new file mode 100644 index 000000000..c26b2c4d3 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -0,0 +1,130 @@ +//! API resources for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. + +use serde::{Deserialize, Serialize}; +use torrust_tracker_clock::conv::convert_from_iso_8601_to_timestamp; + +use crate::core::auth::{self, Key}; + +/// A resource that represents an authentication key. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct AuthKey { + /// The authentication key. + pub key: String, + /// The timestamp when the key will expire. + #[deprecated(since = "3.0.0", note = "please use `expiry_time` instead")] + pub valid_until: Option, // todo: remove when the torrust-index-backend starts using the `expiry_time` attribute. + /// The ISO 8601 timestamp when the key will expire. + pub expiry_time: Option, +} + +impl From for auth::PeerKey { + fn from(auth_key_resource: AuthKey) -> Self { + auth::PeerKey { + key: auth_key_resource.key.parse::().unwrap(), + valid_until: auth_key_resource + .expiry_time + .map(|expiry_time| convert_from_iso_8601_to_timestamp(&expiry_time)), + } + } +} + +#[allow(deprecated)] +impl From for AuthKey { + fn from(auth_key: auth::PeerKey) -> Self { + match (auth_key.valid_until, auth_key.expiry_time()) { + (Some(valid_until), Some(expiry_time)) => AuthKey { + key: auth_key.key.to_string(), + valid_until: Some(valid_until.as_secs()), + expiry_time: Some(expiry_time.to_string()), + }, + _ => AuthKey { + key: auth_key.key.to_string(), + valid_until: None, + expiry_time: None, + }, + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time}; + + use super::AuthKey; + use crate::core::auth::{self, Key}; + use crate::CurrentClock; + + struct TestTime { + pub timestamp: u64, + pub iso_8601_v1: String, + pub iso_8601_v2: String, + } + + fn one_hour_after_unix_epoch() -> TestTime { + let timestamp = 60_u64; + let iso_8601_v1 = "1970-01-01T00:01:00.000Z".to_string(); + let iso_8601_v2 = "1970-01-01 00:01:00 UTC".to_string(); + TestTime { + timestamp, + iso_8601_v1, + iso_8601_v2, + } + } + + #[test] + #[allow(deprecated)] + fn it_should_be_convertible_into_an_auth_key() { + clock::Stopped::local_set_to_unix_epoch(); + + let auth_key_resource = AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(one_hour_after_unix_epoch().timestamp), + expiry_time: Some(one_hour_after_unix_epoch().iso_8601_v1), + }; + + assert_eq!( + auth::PeerKey::from(auth_key_resource), + auth::PeerKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + valid_until: Some(CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap()) + } + ); + } + + #[test] + #[allow(deprecated)] + fn it_should_be_convertible_from_an_auth_key() { + clock::Stopped::local_set_to_unix_epoch(); + + let auth_key = auth::PeerKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line + valid_until: Some(CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap()), + }; + + assert_eq!( + AuthKey::from(auth_key), + AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(one_hour_after_unix_epoch().timestamp), + expiry_time: Some(one_hour_after_unix_epoch().iso_8601_v2), + } + ); + } + + #[test] + #[allow(deprecated)] + fn it_should_be_convertible_into_json() { + assert_eq!( + serde_json::to_string(&AuthKey { + key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line + valid_until: Some(one_hour_after_unix_epoch().timestamp), + expiry_time: Some(one_hour_after_unix_epoch().iso_8601_v1), + }) + .unwrap(), + "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60,\"expiry_time\":\"1970-01-01T00:01:00.000Z\"}" // cspell:disable-line + ); + } +} diff --git a/src/servers/apis/v1/context/auth_key/responses.rs b/src/servers/apis/v1/context/auth_key/responses.rs new file mode 100644 index 000000000..4905d9adc --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/responses.rs @@ -0,0 +1,60 @@ +//! API responses for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. +use std::error::Error; + +use axum::http::{header, StatusCode}; +use axum::response::{IntoResponse, Response}; + +use crate::servers::apis::v1::context::auth_key::resources::AuthKey; +use crate::servers::apis::v1::responses::{bad_request_response, unhandled_rejection_response}; + +/// `200` response that contains the `AuthKey` resource as json. +/// +/// # Panics +/// +/// Will panic if it can't convert the `AuthKey` resource to json +#[must_use] +pub fn auth_key_response(auth_key: &AuthKey) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json; charset=utf-8")], + serde_json::to_string(auth_key).unwrap(), + ) + .into_response() +} + +// Error responses + +/// `500` error response when a new authentication key cannot be generated. +#[must_use] +pub fn failed_to_generate_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to generate key: {e}")) +} + +/// `500` error response when the provide key cannot be added. +#[must_use] +pub fn failed_to_add_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to add key: {e}")) +} + +/// `500` error response when an authentication key cannot be deleted. +#[must_use] +pub fn failed_to_delete_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to delete key: {e}")) +} + +/// `500` error response when the authentication keys cannot be reloaded from +/// the database into memory. +#[must_use] +pub fn failed_to_reload_keys_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload keys: {e}")) +} + +#[must_use] +pub fn invalid_auth_key_response(auth_key: &str, e: E) -> Response { + bad_request_response(&format!("Invalid URL: invalid auth key: string \"{auth_key}\", {e}")) +} + +#[must_use] +pub fn invalid_auth_key_duration_response(duration: u64) -> Response { + bad_request_response(&format!("Invalid URL: invalid auth key duration: \"{duration}\"")) +} diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs new file mode 100644 index 000000000..60ccd77ab --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -0,0 +1,42 @@ +//! API routes for the [`auth_key`](crate::servers::apis::v1::context::auth_key) +//! API context. +//! +//! - `POST /key/:seconds_valid` +//! - `DELETE /key/:key` +//! - `GET /keys/reload` +//! +//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key). +use std::sync::Arc; + +use axum::routing::{get, post}; +use axum::Router; + +use super::handlers::{add_auth_key_handler, delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; +use crate::core::Tracker; + +/// It adds the routes to the router for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + // Keys + router + .route( + // code-review: Axum does not allow two routes with the same path but different path variable name. + // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: + // + // POST /keys + // DELETE /keys/:key + // + // The POST /key/:seconds_valid has been deprecated and it will removed in the future. + // Use POST /keys + &format!("{prefix}/key/:seconds_valid_or_key"), + post(generate_auth_key_handler) + .with_state(tracker.clone()) + .delete(delete_auth_key_handler) + .with_state(tracker.clone()), + ) + // Keys command + .route( + &format!("{prefix}/keys/reload"), + get(reload_keys_handler).with_state(tracker.clone()), + ) + .route(&format!("{prefix}/keys"), post(add_auth_key_handler).with_state(tracker)) +} diff --git a/src/servers/apis/v1/context/health_check/handlers.rs b/src/servers/apis/v1/context/health_check/handlers.rs new file mode 100644 index 000000000..bfbeab549 --- /dev/null +++ b/src/servers/apis/v1/context/health_check/handlers.rs @@ -0,0 +1,11 @@ +//! API handlers for the [`stats`](crate::servers::apis::v1::context::health_check) +//! API context. + +use axum::Json; + +use super::resources::{Report, Status}; + +/// Endpoint for container health check. +pub async fn health_check_handler() -> Json { + Json(Report { status: Status::Ok }) +} diff --git a/src/servers/apis/v1/context/health_check/mod.rs b/src/servers/apis/v1/context/health_check/mod.rs new file mode 100644 index 000000000..b73849511 --- /dev/null +++ b/src/servers/apis/v1/context/health_check/mod.rs @@ -0,0 +1,34 @@ +//! API health check endpoint. +//! +//! It is used to check is the service is running. Especially for containers. +//! +//! # Endpoints +//! +//! - [Health Check](#health-check) +//! +//! # Health Check +//! +//! `GET /api/health_check` +//! +//! Returns the API status. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/health_check" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "Ok", +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to the API [`Stats`](crate::servers::apis::v1::context::health_check::resources::Report) +//! resource for more information about the response attributes. +pub mod handlers; +pub mod resources; diff --git a/src/servers/apis/v1/context/health_check/resources.rs b/src/servers/apis/v1/context/health_check/resources.rs new file mode 100644 index 000000000..9830e643c --- /dev/null +++ b/src/servers/apis/v1/context/health_check/resources.rs @@ -0,0 +1,14 @@ +//! API resources for the [`stats`](crate::servers::apis::v1::context::health_check) +//! API context. +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum Status { + Ok, + Error, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Report { + pub status: Status, +} diff --git a/src/servers/apis/v1/context/mod.rs b/src/servers/apis/v1/context/mod.rs new file mode 100644 index 000000000..be67cd96a --- /dev/null +++ b/src/servers/apis/v1/context/mod.rs @@ -0,0 +1,9 @@ +//! API is organized in resource groups called contexts. +//! +//! Each context is a module that contains the API endpoints related to a +//! specific resource group. +pub mod auth_key; +pub mod health_check; +pub mod stats; +pub mod torrent; +pub mod whitelist; diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs new file mode 100644 index 000000000..c3be5dc7a --- /dev/null +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -0,0 +1,21 @@ +//! API handlers for the [`stats`](crate::servers::apis::v1::context::stats) +//! API context. +use std::sync::Arc; + +use axum::extract::State; +use axum::response::Json; + +use super::resources::Stats; +use super::responses::stats_response; +use crate::core::services::statistics::get_metrics; +use crate::core::Tracker; + +/// It handles the request to get the tracker statistics. +/// +/// It returns a `200` response with a json [`Stats`] +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats#get-tracker-statistics) +/// for more information about this endpoint. +pub async fn get_stats_handler(State(tracker): State>) -> Json { + stats_response(get_metrics(tracker.clone()).await) +} diff --git a/src/servers/apis/v1/context/stats/mod.rs b/src/servers/apis/v1/context/stats/mod.rs new file mode 100644 index 000000000..80f37f73f --- /dev/null +++ b/src/servers/apis/v1/context/stats/mod.rs @@ -0,0 +1,52 @@ +//! Tracker statistics API context. +//! +//! The tracker collects statistics about the number of torrents, seeders, +//! leechers, completed downloads, and the number of requests handled. +//! +//! # Endpoints +//! +//! - [Get tracker statistics](#get-tracker-statistics) +//! +//! # Get tracker statistics +//! +//! `GET /stats` +//! +//! Returns the tracker statistics. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "torrents": 0, +//! "seeders": 0, +//! "completed": 0, +//! "leechers": 0, +//! "tcp4_connections_handled": 0, +//! "tcp4_announces_handled": 0, +//! "tcp4_scrapes_handled": 0, +//! "tcp6_connections_handled": 0, +//! "tcp6_announces_handled": 0, +//! "tcp6_scrapes_handled": 0, +//! "udp4_connections_handled": 0, +//! "udp4_announces_handled": 0, +//! "udp4_scrapes_handled": 0, +//! "udp6_connections_handled": 0, +//! "udp6_announces_handled": 0, +//! "udp6_scrapes_handled": 0 +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to the API [`Stats`](crate::servers::apis::v1::context::stats::resources::Stats) +//! resource for more information about the response attributes. +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs new file mode 100644 index 000000000..9e8ab6bab --- /dev/null +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -0,0 +1,125 @@ +//! API resources for the [`stats`](crate::servers::apis::v1::context::stats) +//! API context. +use serde::{Deserialize, Serialize}; + +use crate::core::services::statistics::TrackerMetrics; + +/// It contains all the statistics generated by the tracker. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Stats { + // Torrent metrics + /// Total number of torrents. + pub torrents: u64, + /// Total number of seeders for all torrents. + pub seeders: u64, + /// Total number of peers that have ever completed downloading for all torrents. + pub completed: u64, + /// Total number of leechers for all torrents. + pub leechers: u64, + + // Protocol metrics + /// Total number of TCP (HTTP tracker) connections from IPv4 peers. + /// Since the HTTP tracker spec does not require a handshake, this metric + /// increases for every HTTP request. + pub tcp4_connections_handled: u64, + /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. + pub tcp4_announces_handled: u64, + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. + pub tcp4_scrapes_handled: u64, + /// Total number of TCP (HTTP tracker) connections from IPv6 peers. + pub tcp6_connections_handled: u64, + /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. + pub tcp6_announces_handled: u64, + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. + pub tcp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) connections from IPv4 peers. + pub udp4_connections_handled: u64, + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. + pub udp4_announces_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. + pub udp4_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. + pub udp6_connections_handled: u64, + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. + pub udp6_announces_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. + pub udp6_scrapes_handled: u64, +} + +impl From for Stats { + fn from(metrics: TrackerMetrics) -> Self { + Self { + torrents: metrics.torrents_metrics.torrents, + seeders: metrics.torrents_metrics.complete, + completed: metrics.torrents_metrics.downloaded, + leechers: metrics.torrents_metrics.incomplete, + tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, + tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, + tcp4_scrapes_handled: metrics.protocol_metrics.tcp4_scrapes_handled, + tcp6_connections_handled: metrics.protocol_metrics.tcp6_connections_handled, + tcp6_announces_handled: metrics.protocol_metrics.tcp6_announces_handled, + tcp6_scrapes_handled: metrics.protocol_metrics.tcp6_scrapes_handled, + udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, + udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, + udp4_scrapes_handled: metrics.protocol_metrics.udp4_scrapes_handled, + udp6_connections_handled: metrics.protocol_metrics.udp6_connections_handled, + udp6_announces_handled: metrics.protocol_metrics.udp6_announces_handled, + udp6_scrapes_handled: metrics.protocol_metrics.udp6_scrapes_handled, + } + } +} + +#[cfg(test)] +mod tests { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + + use super::Stats; + use crate::core::services::statistics::TrackerMetrics; + use crate::core::statistics::Metrics; + + #[test] + fn stats_resource_should_be_converted_from_tracker_metrics() { + assert_eq!( + Stats::from(TrackerMetrics { + torrents_metrics: TorrentsMetrics { + complete: 1, + downloaded: 2, + incomplete: 3, + torrents: 4 + }, + protocol_metrics: Metrics { + tcp4_connections_handled: 5, + tcp4_announces_handled: 6, + tcp4_scrapes_handled: 7, + tcp6_connections_handled: 8, + tcp6_announces_handled: 9, + tcp6_scrapes_handled: 10, + udp4_connections_handled: 11, + udp4_announces_handled: 12, + udp4_scrapes_handled: 13, + udp6_connections_handled: 14, + udp6_announces_handled: 15, + udp6_scrapes_handled: 16 + } + }), + Stats { + torrents: 4, + seeders: 1, + completed: 2, + leechers: 3, + tcp4_connections_handled: 5, + tcp4_announces_handled: 6, + tcp4_scrapes_handled: 7, + tcp6_connections_handled: 8, + tcp6_announces_handled: 9, + tcp6_scrapes_handled: 10, + udp4_connections_handled: 11, + udp4_announces_handled: 12, + udp4_scrapes_handled: 13, + udp6_connections_handled: 14, + udp6_announces_handled: 15, + udp6_scrapes_handled: 16 + } + ); + } +} diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs new file mode 100644 index 000000000..9d03ccedf --- /dev/null +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -0,0 +1,11 @@ +//! API responses for the [`stats`](crate::servers::apis::v1::context::stats) +//! API context. +use axum::response::Json; + +use super::resources::Stats; +use crate::core::services::statistics::TrackerMetrics; + +/// `200` response that contains the [`Stats`] resource as json. +pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { + Json(Stats::from(tracker_metrics)) +} diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs new file mode 100644 index 000000000..d8d552697 --- /dev/null +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -0,0 +1,17 @@ +//! API routes for the [`stats`](crate::servers::apis::v1::context::stats) API context. +//! +//! - `GET /stats` +//! +//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats). +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::get_stats_handler; +use crate::core::Tracker; + +/// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + router.route(&format!("{prefix}/stats"), get(get_stats_handler).with_state(tracker)) +} diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs new file mode 100644 index 000000000..ebca504fd --- /dev/null +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -0,0 +1,135 @@ +//! API handlers for the [`torrent`](crate::servers::apis::v1::context::torrent) +//! API context. +use std::fmt; +use std::str::FromStr; +use std::sync::Arc; + +use axum::extract::{Path, State}; +use axum::response::{IntoResponse, Response}; +use axum_extra::extract::Query; +use serde::{de, Deserialize, Deserializer}; +use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; + +use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; +use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page}; +use crate::core::Tracker; +use crate::servers::apis::v1::responses::invalid_info_hash_param_response; +use crate::servers::apis::InfoHashParam; + +/// It handles the request to get the torrent data. +/// +/// It returns: +/// +/// - `200` response with a json [`Torrent`](crate::servers::apis::v1::context::torrent::resources::torrent::Torrent). +/// - `500` with serialized error in debug format if the torrent is not known. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#get-a-torrent) +/// for more information about this endpoint. +pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { + Some(info) => torrent_info_response(info).into_response(), + None => torrent_not_known_response(), + }, + } +} + +/// A container for the URL query parameters. +/// +/// Pagination: `offset` and `limit`. +/// Array of infohashes: `info_hash`. +/// +/// You can either get all torrents with pagination or get a list of torrents +/// providing a list of infohashes. For example: +/// +/// First page of torrents: +/// +/// +/// +/// +/// Only two torrents: +/// +/// +/// +/// +/// NOTICE: Pagination is ignored if array of infohashes is provided. +#[derive(Deserialize, Debug)] +pub struct QueryParams { + /// The offset of the first page to return. Starts at 0. + #[serde(default, deserialize_with = "empty_string_as_none")] + pub offset: Option, + /// The maximum number of items to return per page. + #[serde(default, deserialize_with = "empty_string_as_none")] + pub limit: Option, + /// A list of infohashes to retrieve. + #[serde(default, rename = "info_hash")] + pub info_hashes: Vec, +} + +/// It handles the request to get a list of torrents. +/// +/// It returns a `200` response with a json array with [`crate::servers::apis::v1::context::torrent::resources::torrent::ListItem`] resources. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#list-torrents) +/// for more information about this endpoint. +pub async fn get_torrents_handler(State(tracker): State>, pagination: Query) -> Response { + tracing::debug!("pagination: {:?}", pagination); + + if pagination.0.info_hashes.is_empty() { + torrent_list_response( + &get_torrents_page( + tracker.clone(), + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), + ) + .await, + ) + .into_response() + } else { + match parse_info_hashes(pagination.0.info_hashes) { + Ok(info_hashes) => torrent_list_response(&get_torrents(tracker.clone(), &info_hashes).await).into_response(), + Err(err) => match err { + QueryParamError::InvalidInfoHash { info_hash } => invalid_info_hash_param_response(&info_hash), + }, + } + } +} + +#[derive(Error, Debug)] +pub enum QueryParamError { + #[error("invalid infohash {info_hash}")] + InvalidInfoHash { info_hash: String }, +} + +fn parse_info_hashes(info_hashes_str: Vec) -> Result, QueryParamError> { + let mut info_hashes: Vec = Vec::new(); + + for info_hash_str in info_hashes_str { + match InfoHash::from_str(&info_hash_str) { + Ok(info_hash) => info_hashes.push(info_hash), + Err(_err) => { + return Err(QueryParamError::InvalidInfoHash { + info_hash: info_hash_str, + }) + } + } + } + + Ok(info_hashes) +} + +/// Serde deserialization decorator to map empty Strings to None, +fn empty_string_as_none<'de, D, T>(de: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: FromStr, + T::Err: fmt::Display, +{ + let opt = Option::::deserialize(de)?; + match opt.as_deref() { + None | Some("") => Ok(None), + Some(s) => FromStr::from_str(s).map_err(de::Error::custom).map(Some), + } +} diff --git a/src/servers/apis/v1/context/torrent/mod.rs b/src/servers/apis/v1/context/torrent/mod.rs new file mode 100644 index 000000000..1658e1748 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/mod.rs @@ -0,0 +1,113 @@ +//! Torrents API context. +//! +//! This API context is responsible for handling all the requests related to +//! the torrents data stored by the tracker. +//! +//! # Endpoints +//! +//! - [Get a torrent](#get-a-torrent) +//! - [List torrents](#list-torrents) +//! +//! # Get a torrent +//! +//! `GET /torrent/:info_hash` +//! +//! Returns all the information about a torrent. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `info_hash` | 40-char string | The Info Hash v1 | Yes | `5452869be36f9f3350ccee6b4544e7e76caaadab` +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/torrent/5452869be36f9f3350ccee6b4544e7e76caaadab?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "info_hash": "5452869be36f9f3350ccee6b4544e7e76caaadab", +//! "seeders": 1, +//! "completed": 0, +//! "leechers": 0, +//! "peers": [ +//! { +//! "peer_id": { +//! "id": "0x2d7142343431302d2a64465a3844484944704579", +//! "client": "qBittorrent" +//! }, +//! "peer_addr": "192.168.1.88:17548", +//! "updated": 1680082693001, +//! "updated_milliseconds_ago": 1680082693001, +//! "uploaded": 0, +//! "downloaded": 0, +//! "left": 0, +//! "event": "None" +//! } +//! ] +//! } +//! ``` +//! +//! **Not Found response** `200` +//! +//! This response is returned when the tracker does not have the torrent. +//! +//! ```json +//! "torrent not known" +//! ``` +//! +//! **Resource** +//! +//! Refer to the API [`Torrent`](crate::servers::apis::v1::context::torrent::resources::torrent::Torrent) +//! resource for more information about the response attributes. +//! +//! # List torrents +//! +//! `GET /torrents` +//! +//! Returns basic information (no peer list) for all torrents. +//! +//! **Query parameters** +//! +//! The endpoint supports pagination. +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `offset` | positive integer | The page number, starting at 0 | No | `1` +//! `limit` | positive integer | Page size. The number of results per page | No | `10` +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/torrents?token=MyAccessToken&offset=1&limit=1" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! [ +//! { +//! "info_hash": "5452869be36f9f3350ccee6b4544e7e76caaadab", +//! "seeders": 1, +//! "completed": 0, +//! "leechers": 0, +//! "peers": null +//! } +//! ] +//! ``` +//! +//! **Resource** +//! +//! Refer to the API [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem) +//! resource for more information about the attributes for a single item in the +//! response. +//! +//! > **NOTICE**: this endpoint does not include the `peers` list. +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/context/torrent/resources/mod.rs b/src/servers/apis/v1/context/torrent/resources/mod.rs new file mode 100644 index 000000000..a6dbff726 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/resources/mod.rs @@ -0,0 +1,4 @@ +//! API resources for the [`torrent`](crate::servers::apis::v1::context::torrent) +//! API context. +pub mod peer; +pub mod torrent; diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs new file mode 100644 index 000000000..dd4a6cc26 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -0,0 +1,77 @@ +//! `Peer` and Peer `Id` API resources. +use aquatic_udp_protocol::PeerId; +use derive_more::From; +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::peer; + +/// `Peer` API resource. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Peer { + /// The peer's ID. See [`Id`]. + pub peer_id: Id, + /// The peer's socket address. For example: `192.168.1.88:17548`. + pub peer_addr: String, + /// The peer's last update time in milliseconds. + #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] + pub updated: u128, + /// The peer's last update time in milliseconds. + pub updated_milliseconds_ago: u128, + /// The peer's uploaded bytes. + pub uploaded: i64, + /// The peer's downloaded bytes. + pub downloaded: i64, + /// The peer's left bytes (pending to download). + pub left: i64, + /// The peer's event: `started`, `stopped`, `completed`. + /// See [`AnnounceEvent`](aquatic_udp_protocol::AnnounceEvent). + pub event: String, +} + +/// Peer `Id` API resource. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Id { + /// The peer's ID in hex format. For example: `0x2d7142343431302d2a64465a3844484944704579`. + pub id: Option, + /// The peer's client name. For example: `qBittorrent`. + pub client: Option, +} + +impl From for Id { + fn from(peer_id: PeerId) -> Self { + let peer_id = peer::Id::from(peer_id); + Id { + id: peer_id.to_hex_string(), + client: peer_id.get_client_name(), + } + } +} + +impl From for Peer { + fn from(value: peer::Peer) -> Self { + #[allow(deprecated)] + Peer { + peer_id: Id::from(value.peer_id), + peer_addr: value.peer_addr.to_string(), + updated: value.updated.as_millis(), + updated_milliseconds_ago: value.updated.as_millis(), + uploaded: value.uploaded.0.get(), + downloaded: value.downloaded.0.get(), + left: value.left.0.get(), + event: format!("{:?}", value.event), + } + } +} + +#[derive(From, PartialEq, Default)] +pub struct Vector(pub Vec); + +impl FromIterator for Vector { + fn from_iter>(iter: T) -> Self { + let mut peers = Vector::default(); + + for i in iter { + peers.0.push(i.into()); + } + peers + } +} diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs new file mode 100644 index 000000000..657382c0c --- /dev/null +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -0,0 +1,158 @@ +//! `Torrent` and `ListItem` API resources. +//! +//! - `Torrent` is the full torrent resource. +//! - `ListItem` is a list item resource on a torrent list. `ListItem` does +//! include a `peers` field but it is always `None` in the struct and `null` in +//! the JSON response. +use serde::{Deserialize, Serialize}; + +use crate::core::services::torrent::{BasicInfo, Info}; + +/// `Torrent` API resource. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Torrent { + /// The torrent's info hash v1. + pub info_hash: String, + /// The torrent's seeders counter. Active peers with a full copy of the + /// torrent. + pub seeders: u64, + /// The torrent's completed counter. Peers that have ever completed the + /// download. + pub completed: u64, + /// The torrent's leechers counter. Active peers that are downloading the + /// torrent. + pub leechers: u64, + /// The torrent's peers. See [`Peer`](crate::servers::apis::v1::context::torrent::resources::peer::Peer). + #[serde(skip_serializing_if = "Option::is_none")] + pub peers: Option>, +} + +/// `ListItem` API resource. A list item on a torrent list. +/// `ListItem` does include a `peers` field but it is always `None` in the +/// struct and `null` in the JSON response. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ListItem { + /// The torrent's info hash v1. + pub info_hash: String, + /// The torrent's seeders counter. Active peers with a full copy of the + /// torrent. + pub seeders: u64, + /// The torrent's completed counter. Peers that have ever completed the + /// download. + pub completed: u64, + /// The torrent's leechers counter. Active peers that are downloading the + /// torrent. + pub leechers: u64, +} + +impl ListItem { + #[must_use] + pub fn new_vec(basic_info_vec: &[BasicInfo]) -> Vec { + basic_info_vec + .iter() + .map(|basic_info| ListItem::from((*basic_info).clone())) + .collect() + } +} + +/// Maps an array of the domain type [`BasicInfo`] +/// to the API resource type [`ListItem`]. +#[must_use] +pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { + basic_info_vec + .iter() + .map(|basic_info| ListItem::from((*basic_info).clone())) + .collect() +} + +impl From for Torrent { + fn from(info: Info) -> Self { + let peers: Option = info.peers.map(|peers| peers.into_iter().collect()); + + let peers: Option> = peers.map(|peers| peers.0); + + Self { + info_hash: info.info_hash.to_string(), + seeders: info.seeders, + completed: info.completed, + leechers: info.leechers, + peers, + } + } +} + +impl From for ListItem { + fn from(basic_info: BasicInfo) -> Self { + Self { + info_hash: basic_info.info_hash.to_string(), + seeders: basic_info.seeders, + completed: basic_info.completed, + leechers: basic_info.leechers, + } + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + + use super::Torrent; + use crate::core::services::torrent::{BasicInfo, Info}; + use crate::servers::apis::v1::context::torrent::resources::peer::Peer; + use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } + } + + #[test] + fn torrent_resource_should_be_converted_from_torrent_info() { + assert_eq!( + Torrent::from(Info { + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + seeders: 1, + completed: 2, + leechers: 3, + peers: Some(vec![sample_peer()]), + }), + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 2, + leechers: 3, + peers: Some(vec![Peer::from(sample_peer())]), + } + ); + } + + #[test] + fn torrent_resource_list_item_should_be_converted_from_the_basic_torrent_info() { + assert_eq!( + ListItem::from(BasicInfo { + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + seeders: 1, + completed: 2, + leechers: 3, + }), + ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 2, + leechers: 3, + } + ); + } +} diff --git a/src/servers/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs new file mode 100644 index 000000000..5daceaf94 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/responses.rs @@ -0,0 +1,27 @@ +//! API responses for the [`torrent`](crate::servers::apis::v1::context::torrent) +//! API context. +use axum::response::{IntoResponse, Json, Response}; +use serde_json::json; + +use super::resources::torrent::{ListItem, Torrent}; +use crate::core::services::torrent::{BasicInfo, Info}; + +/// `200` response that contains an array of +/// [`ListItem`] +/// resources as json. +pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { + Json(ListItem::new_vec(basic_infos)) +} + +/// `200` response that contains a +/// [`Torrent`] +/// resources as json. +pub fn torrent_info_response(info: Info) -> Json { + Json(Torrent::from(info)) +} + +/// `500` error response in plain text returned when a torrent is not found. +#[must_use] +pub fn torrent_not_known_response() -> Response { + Json(json!("torrent not known")).into_response() +} diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs new file mode 100644 index 000000000..6f8c28df5 --- /dev/null +++ b/src/servers/apis/v1/context/torrent/routes.rs @@ -0,0 +1,24 @@ +//! API routes for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. +//! +//! - `GET /torrent/:info_hash` +//! - `GET /torrents` +//! +//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent). +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::{get_torrent_handler, get_torrents_handler}; +use crate::core::Tracker; + +/// It adds the routes to the router for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + // Torrents + router + .route( + &format!("{prefix}/torrent/:info_hash"), + get(get_torrent_handler).with_state(tracker.clone()), + ) + .route(&format!("{prefix}/torrents"), get(get_torrents_handler).with_state(tracker)) +} diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs new file mode 100644 index 000000000..32e434918 --- /dev/null +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -0,0 +1,77 @@ +//! API handlers for the [`whitelist`](crate::servers::apis::v1::context::whitelist) +//! API context. +use std::str::FromStr; +use std::sync::Arc; + +use axum::extract::{Path, State}; +use axum::response::Response; +use torrust_tracker_primitives::info_hash::InfoHash; + +use super::responses::{ + failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, +}; +use crate::core::Tracker; +use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; +use crate::servers::apis::InfoHashParam; + +/// It handles the request to add a torrent to the whitelist. +/// +/// It returns: +/// +/// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. +/// - `500` with serialized error in debug format if the torrent couldn't be whitelisted. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#add-a-torrent-to-the-whitelist) +/// for more information about this endpoint. +pub async fn add_torrent_to_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(()) => ok_response(), + Err(e) => failed_to_whitelist_torrent_response(e), + }, + } +} + +/// It handles the request to remove a torrent to the whitelist. +/// +/// It returns: +/// +/// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. +/// - `500` with serialized error in debug format if the torrent couldn't be +/// removed from the whitelisted. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#remove-a-torrent-from-the-whitelist) +/// for more information about this endpoint. +pub async fn remove_torrent_from_whitelist_handler( + State(tracker): State>, + Path(info_hash): Path, +) -> Response { + match InfoHash::from_str(&info_hash.0) { + Err(_) => invalid_info_hash_param_response(&info_hash.0), + Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(()) => ok_response(), + Err(e) => failed_to_remove_torrent_from_whitelist_response(e), + }, + } +} + +/// It handles the request to reload the torrent whitelist from the database. +/// +/// It returns: +/// +/// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. +/// - `500` with serialized error in debug format if the torrent whitelist +/// couldn't be reloaded from the database. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#reload-the-whitelist) +/// for more information about this endpoint. +pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { + match tracker.load_whitelist_from_database().await { + Ok(()) => ok_response(), + Err(e) => failed_to_reload_whitelist_response(e), + } +} diff --git a/src/servers/apis/v1/context/whitelist/mod.rs b/src/servers/apis/v1/context/whitelist/mod.rs new file mode 100644 index 000000000..79da43fdc --- /dev/null +++ b/src/servers/apis/v1/context/whitelist/mod.rs @@ -0,0 +1,98 @@ +//! Whitelist API context. +//! +//! This API context is responsible for handling all the requests related to +//! the torrent whitelist. +//! +//! A torrent whitelist is a list of Info Hashes that are allowed to be tracked +//! by the tracker. This is useful when you want to limit the torrents that are +//! tracked by the tracker. +//! +//! Common tracker requests like `announce` and `scrape` are limited to the +//! torrents in the whitelist. The whitelist can be updated using the API. +//! +//! > **NOTICE**: the whitelist is only used when the tracker is configured to +//! > in `listed` or `private_listed` modes. Refer to the +//! > [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) +//! > to know how to enable the those modes. +//! +//! > **NOTICE**: if the tracker is not running in `listed` or `private_listed` +//! > modes the requests to the whitelist API will be ignored. +//! +//! # Endpoints +//! +//! - [Add a torrent to the whitelist](#add-a-torrent-to-the-whitelist) +//! - [Remove a torrent from the whitelist](#remove-a-torrent-from-the-whitelist) +//! - [Reload the whitelist](#reload-the-whitelist) +//! +//! # Add a torrent to the whitelist +//! +//! `POST /whitelist/:info_hash` +//! +//! It adds a torrent infohash to the whitelist. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `info_hash` | 40-char string | The Info Hash v1 | Yes | `5452869be36f9f3350ccee6b4544e7e76caaadab` +//! +//! **Example request** +//! +//! ```bash +//! curl -X POST "http://127.0.0.1:1212/api/v1/whitelist/5452869be36f9f3350ccee6b4544e7e76caaadab?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` +//! +//! # Remove a torrent from the whitelist +//! +//! `DELETE /whitelist/:info_hash` +//! +//! It removes a torrent infohash to the whitelist. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `info_hash` | 40-char string | The Info Hash v1 | Yes | `5452869be36f9f3350ccee6b4544e7e76caaadab` +//! +//! **Example request** +//! +//! ```bash +//! curl -X DELETE "http://127.0.0.1:1212/api/v1/whitelist/5452869be36f9f3350ccee6b4544e7e76caaadab?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` +//! +//! # Reload the whitelist +//! +//! It reloads the whitelist from the database. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/v1/whitelist/reload?token=MyAccessToken" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "ok" +//! } +//! ``` +pub mod handlers; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/context/whitelist/responses.rs b/src/servers/apis/v1/context/whitelist/responses.rs new file mode 100644 index 000000000..ce901c2f0 --- /dev/null +++ b/src/servers/apis/v1/context/whitelist/responses.rs @@ -0,0 +1,25 @@ +//! API responses for the [`whitelist`](crate::servers::apis::v1::context::whitelist) +//! API context. +use std::error::Error; + +use axum::response::Response; + +use crate::servers::apis::v1::responses::unhandled_rejection_response; + +/// `500` error response when a torrent cannot be removed from the whitelist. +#[must_use] +pub fn failed_to_remove_torrent_from_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to remove torrent from whitelist: {e}")) +} + +/// `500` error response when a torrent cannot be added to the whitelist. +#[must_use] +pub fn failed_to_whitelist_torrent_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to whitelist torrent: {e}")) +} + +/// `500` error response when the whitelist cannot be reloaded from the database. +#[must_use] +pub fn failed_to_reload_whitelist_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to reload whitelist: {e}")) +} diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs new file mode 100644 index 000000000..e4e85181f --- /dev/null +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -0,0 +1,32 @@ +//! API routes for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. +//! +//! - `POST /whitelist/:info_hash` +//! - `DELETE /whitelist/:info_hash` +//! - `GET /whitelist/reload` +//! +//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent). +use std::sync::Arc; + +use axum::routing::{delete, get, post}; +use axum::Router; + +use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; +use crate::core::Tracker; + +/// It adds the routes to the router for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + let prefix = format!("{prefix}/whitelist"); + + router + // Whitelisted torrents + .route( + &format!("{prefix}/:info_hash"), + post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + ) + .route( + &format!("{prefix}/:info_hash"), + delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), + ) + // Whitelist commands + .route(&format!("{prefix}/reload"), get(reload_whitelist_handler).with_state(tracker)) +} diff --git a/src/servers/apis/v1/middlewares/auth.rs b/src/servers/apis/v1/middlewares/auth.rs new file mode 100644 index 000000000..58219c7ca --- /dev/null +++ b/src/servers/apis/v1/middlewares/auth.rs @@ -0,0 +1,95 @@ +//! Authentication middleware for the API. +//! +//! It uses a "token" GET param to authenticate the user. URLs must be of the +//! form: +//! +//! `http://:/api/v1/?token=`. +//! +//! > **NOTICE**: the token can be at any position in the URL, not just at the +//! > beginning or at the end. +//! +//! The token must be one of the `access_tokens` in the tracker +//! [HTTP API configuration](torrust_tracker_configuration::HttpApi). +//! +//! The configuration file `tracker.toml` contains a list of tokens: +//! +//! ```toml +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! ``` +//! +//! All the tokes have the same permissions, so it is not possible to have +//! different permissions for different tokens. The label is only used to +//! identify the token. +use std::sync::Arc; + +use axum::extract::{self}; +use axum::http::Request; +use axum::middleware::Next; +use axum::response::{IntoResponse, Response}; +use serde::Deserialize; +use torrust_tracker_configuration::AccessTokens; + +use crate::servers::apis::v1::responses::unhandled_rejection_response; + +/// Container for the `token` extracted from the query params. +#[derive(Deserialize, Debug)] +pub struct QueryParams { + pub token: Option, +} + +#[derive(Clone, Debug)] +pub struct State { + pub access_tokens: Arc, +} + +/// Middleware for authentication using a "token" GET param. +/// The token must be one of the tokens in the tracker [HTTP API configuration](torrust_tracker_configuration::HttpApi). +pub async fn auth( + extract::State(state): extract::State, + extract::Query(params): extract::Query, + request: Request, + next: Next, +) -> Response { + let Some(token) = params.token else { + return AuthError::Unauthorized.into_response(); + }; + + if !authenticate(&token, &state.access_tokens) { + return AuthError::TokenNotValid.into_response(); + } + + next.run(request).await +} + +enum AuthError { + /// Missing token for authentication. + Unauthorized, + /// Token was provided but it is not valid. + TokenNotValid, +} + +impl IntoResponse for AuthError { + fn into_response(self) -> Response { + match self { + AuthError::Unauthorized => unauthorized_response(), + AuthError::TokenNotValid => token_not_valid_response(), + } + } +} + +fn authenticate(token: &str, tokens: &AccessTokens) -> bool { + tokens.values().any(|t| t == token) +} + +/// `500` error response returned when the token is missing. +#[must_use] +pub fn unauthorized_response() -> Response { + unhandled_rejection_response("unauthorized".to_string()) +} + +/// `500` error response when the provided token is not valid. +#[must_use] +pub fn token_not_valid_response() -> Response { + unhandled_rejection_response("token not valid".to_string()) +} diff --git a/src/servers/apis/v1/middlewares/mod.rs b/src/servers/apis/v1/middlewares/mod.rs new file mode 100644 index 000000000..141e3038a --- /dev/null +++ b/src/servers/apis/v1/middlewares/mod.rs @@ -0,0 +1,2 @@ +//! API middlewares. See [Axum middlewares](axum::middleware). +pub mod auth; diff --git a/src/servers/apis/v1/mod.rs b/src/servers/apis/v1/mod.rs new file mode 100644 index 000000000..372ae0ff9 --- /dev/null +++ b/src/servers/apis/v1/mod.rs @@ -0,0 +1,22 @@ +//! The API version `v1`. +//! +//! The API is organized in the following contexts: +//! +//! Context | Description | Version +//! ---|---|--- +//! `Stats` | Tracker statistics | [`v1`](crate::servers::apis::v1::context::stats) +//! `Torrents` | Torrents | [`v1`](crate::servers::apis::v1::context::torrent) +//! `Whitelist` | Torrents whitelist | [`v1`](crate::servers::apis::v1::context::whitelist) +//! `Authentication keys` | Authentication keys | [`v1`](crate::servers::apis::v1::context::auth_key) +//! +//! > **NOTICE**: +//! - The authentication keys are only used by the HTTP tracker. +//! - The whitelist is only used when the tracker is running in `listed` or +//! `private_listed` mode. +//! +//! Refer to the [authentication middleware](crate::servers::apis::v1::middlewares::auth) +//! for more information about the authentication process. +pub mod context; +pub mod middlewares; +pub mod responses; +pub mod routes; diff --git a/src/servers/apis/v1/responses.rs b/src/servers/apis/v1/responses.rs new file mode 100644 index 000000000..d2c52ac40 --- /dev/null +++ b/src/servers/apis/v1/responses.rs @@ -0,0 +1,84 @@ +//! Common responses for the API v1 shared by all the contexts. +use axum::http::{header, StatusCode}; +use axum::response::{IntoResponse, Response}; +use serde::Serialize; + +/* code-review: + When Axum cannot parse a path or query param it shows a message like this: + + For the "seconds_valid_or_key" path param: + + "Invalid URL: Cannot parse "-1" to a `u64`" + + That message is not an informative message, specially if you have more than one param. + We should show a message similar to the one we use when we parse the value in the handler. + For example: + + "Invalid URL: invalid infohash param: string \"INVALID VALUE\", expected a 40 character long string" + + We can customize the error message by using a custom type with custom serde deserialization. + The same we are using for the "InfoHashVisitor". + + Input data from HTTP requests should use struts with primitive types (first level of validation). + We can put the second level of validation in the application and domain services. +*/ + +/// Response status used when requests have only two possible results +/// `Ok` or `Error` and no data is returned. +#[derive(Serialize, Debug)] +#[serde(tag = "status", rename_all = "snake_case")] +pub enum ActionStatus<'a> { + Ok, + Err { reason: std::borrow::Cow<'a, str> }, +} + +// OK response + +/// # Panics +/// +/// Will panic if it can't convert the `ActionStatus` to json +#[must_use] +pub fn ok_response() -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "application/json")], + serde_json::to_string(&ActionStatus::Ok).unwrap(), + ) + .into_response() +} + +// Error responses + +#[must_use] +pub fn invalid_info_hash_param_response(info_hash: &str) -> Response { + bad_request_response(&format!( + "Invalid URL: invalid infohash param: string \"{info_hash}\", expected a 40 character long string" + )) +} + +#[must_use] +pub fn invalid_auth_key_param_response(invalid_key: &str) -> Response { + bad_request_response(&format!("Invalid auth key id param \"{invalid_key}\"")) +} + +#[must_use] +pub fn bad_request_response(body: &str) -> Response { + ( + StatusCode::BAD_REQUEST, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + body.to_owned(), + ) + .into_response() +} + +/// This error response is to keep backward compatibility with the old API. +/// It should be a plain text or json. +#[must_use] +pub fn unhandled_rejection_response(reason: String) -> Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], + format!("Unhandled rejection: {:?}", ActionStatus::Err { reason: reason.into() }), + ) + .into_response() +} diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs new file mode 100644 index 000000000..3786b3532 --- /dev/null +++ b/src/servers/apis/v1/routes.rs @@ -0,0 +1,18 @@ +//! Route initialization for the v1 API. +use std::sync::Arc; + +use axum::Router; + +use super::context::{auth_key, stats, torrent, whitelist}; +use crate::core::Tracker; + +/// Add the routes for the v1 API. +pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { + let v1_prefix = format!("{prefix}/v1"); + + let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); + let router = stats::routes::add(&v1_prefix, router, tracker.clone()); + let router = whitelist::routes::add(&v1_prefix, router, tracker.clone()); + + torrent::routes::add(&v1_prefix, router, tracker) +} diff --git a/src/servers/custom_axum_server.rs b/src/servers/custom_axum_server.rs new file mode 100644 index 000000000..5705ef24e --- /dev/null +++ b/src/servers/custom_axum_server.rs @@ -0,0 +1,275 @@ +//! Wrapper for Axum server to add timeouts. +//! +//! Copyright (c) Eray Karatay ([@programatik29](https://github.com/programatik29)). +//! +//! See: . +//! +//! If a client opens a HTTP connection and it does not send any requests, the +//! connection is closed after a timeout. You can test it with: +//! +//! ```text +//! telnet 127.0.0.1 1212 +//! Trying 127.0.0.1... +//! Connected to 127.0.0.1. +//! Escape character is '^]'. +//! Connection closed by foreign host. +//! ``` +//! +//! If you want to know more about Axum and timeouts see . +use std::future::Ready; +use std::io::ErrorKind; +use std::net::TcpListener; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +use axum_server::accept::Accept; +use axum_server::tls_rustls::{RustlsAcceptor, RustlsConfig}; +use axum_server::Server; +use futures_util::{ready, Future}; +use http_body::{Body, Frame}; +use hyper::Response; +use hyper_util::rt::TokioTimer; +use pin_project_lite::pin_project; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; +use tokio::time::{Instant, Sleep}; +use tower::Service; + +const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); +const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); +const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); + +#[must_use] +pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { + add_timeouts(axum_server::from_tcp(socket)) +} + +#[must_use] +pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> Server { + add_timeouts(axum_server::from_tcp_rustls(socket, tls)) +} + +fn add_timeouts(mut server: Server) -> Server { + server.http_builder().http1().timer(TokioTimer::new()); + server.http_builder().http2().timer(TokioTimer::new()); + + server.http_builder().http1().header_read_timeout(HTTP1_HEADER_READ_TIMEOUT); + server + .http_builder() + .http2() + .keep_alive_timeout(HTTP2_KEEP_ALIVE_TIMEOUT) + .keep_alive_interval(HTTP2_KEEP_ALIVE_INTERVAL); + + server +} + +#[derive(Clone)] +pub struct TimeoutAcceptor; + +impl Accept for TimeoutAcceptor { + type Stream = TimeoutStream; + type Service = TimeoutService; + type Future = Ready>; + + fn accept(&self, stream: I, service: S) -> Self::Future { + let (tx, rx) = mpsc::unbounded_channel(); + + let stream = TimeoutStream::new(stream, HTTP1_HEADER_READ_TIMEOUT, rx); + let service = TimeoutService::new(service, tx); + + std::future::ready(Ok((stream, service))) + } +} + +#[derive(Clone)] +pub struct TimeoutService { + inner: S, + sender: UnboundedSender, +} + +impl TimeoutService { + fn new(inner: S, sender: UnboundedSender) -> Self { + Self { inner, sender } + } +} + +impl Service for TimeoutService +where + S: Service>, +{ + type Response = Response>; + type Error = S::Error; + type Future = TimeoutServiceFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + // send timer wait signal + let _ = self.sender.send(TimerSignal::Wait); + + TimeoutServiceFuture::new(self.inner.call(req), self.sender.clone()) + } +} + +pin_project! { + pub struct TimeoutServiceFuture { + #[pin] + inner: F, + sender: Option>, + } +} + +impl TimeoutServiceFuture { + fn new(inner: F, sender: UnboundedSender) -> Self { + Self { + inner, + sender: Some(sender), + } + } +} + +impl Future for TimeoutServiceFuture +where + F: Future, E>>, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + this.inner.poll(cx).map(|result| { + result.map(|response| { + response.map(|body| TimeoutBody::new(body, this.sender.take().expect("future polled after ready"))) + }) + }) + } +} + +enum TimerSignal { + Wait, + Reset, +} + +pin_project! { + pub struct TimeoutBody { + #[pin] + inner: B, + sender: UnboundedSender, + } +} + +impl TimeoutBody { + fn new(inner: B, sender: UnboundedSender) -> Self { + Self { inner, sender } + } +} + +impl Body for TimeoutBody { + type Data = B::Data; + type Error = B::Error; + + fn poll_frame(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>>> { + let this = self.project(); + let option = ready!(this.inner.poll_frame(cx)); + + if option.is_none() { + let _ = this.sender.send(TimerSignal::Reset); + } + + Poll::Ready(option) + } + + fn is_end_stream(&self) -> bool { + let is_end_stream = self.inner.is_end_stream(); + + if is_end_stream { + let _ = self.sender.send(TimerSignal::Reset); + } + + is_end_stream + } + + fn size_hint(&self) -> http_body::SizeHint { + self.inner.size_hint() + } +} + +pub struct TimeoutStream { + inner: IO, + // hyper requires unpin + sleep: Pin>, + duration: Duration, + waiting: bool, + receiver: UnboundedReceiver, + finished: bool, +} + +impl TimeoutStream { + fn new(inner: IO, duration: Duration, receiver: UnboundedReceiver) -> Self { + Self { + inner, + sleep: Box::pin(tokio::time::sleep(duration)), + duration, + waiting: false, + receiver, + finished: false, + } + } +} + +impl AsyncRead for TimeoutStream { + fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { + if !self.finished { + match Pin::new(&mut self.receiver).poll_recv(cx) { + // reset the timer + Poll::Ready(Some(TimerSignal::Reset)) => { + self.waiting = false; + + let deadline = Instant::now() + self.duration; + self.sleep.as_mut().reset(deadline); + } + // enter waiting mode (for response body last chunk) + Poll::Ready(Some(TimerSignal::Wait)) => self.waiting = true, + Poll::Ready(None) => self.finished = true, + Poll::Pending => (), + } + } + + if !self.waiting { + // return error if timer is elapsed + if let Poll::Ready(()) = self.sleep.as_mut().poll(cx) { + return Poll::Ready(Err(std::io::Error::new(ErrorKind::TimedOut, "request header read timed out"))); + } + } + + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +impl AsyncWrite for TimeoutStream { + fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { + Pin::new(&mut self.inner).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_shutdown(cx) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) + } + + fn is_write_vectored(&self) -> bool { + self.inner.is_write_vectored() + } +} diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs new file mode 100644 index 000000000..fe65e996b --- /dev/null +++ b/src/servers/health_check_api/handlers.rs @@ -0,0 +1,52 @@ +use std::collections::VecDeque; + +use axum::extract::State; +use axum::Json; +use tracing::{instrument, Level}; + +use super::resources::{CheckReport, Report}; +use super::responses; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistry}; + +/// Endpoint for container health check. +/// +/// Creates a vector [`CheckReport`] from the input set of [`CheckJob`], and then builds a report from the results. +/// +#[instrument(skip(register), ret(level = Level::DEBUG))] +pub(crate) async fn health_check_handler(State(register): State) -> Json { + #[allow(unused_assignments)] + let mut checks: VecDeque = VecDeque::new(); + + { + let mutex = register.lock(); + + checks = mutex.await.values().map(ServiceRegistration::spawn_check).collect(); + } + + // if we do not have any checks, lets return a `none` result. + if checks.is_empty() { + return responses::none(); + } + + let jobs = checks.drain(..).map(|c| { + tokio::spawn(async move { + CheckReport { + binding: c.binding, + info: c.info.clone(), + result: c.job.await.expect("it should be able to join into the checking function"), + } + }) + }); + + let results: Vec = futures::future::join_all(jobs) + .await + .drain(..) + .map(|r| r.expect("it should be able to connect to the job")) + .collect(); + + if results.iter().any(CheckReport::fail) { + responses::error("health check failed".to_string(), results) + } else { + responses::ok(results) + } +} diff --git a/src/servers/health_check_api/mod.rs b/src/servers/health_check_api/mod.rs new file mode 100644 index 000000000..24c5232c8 --- /dev/null +++ b/src/servers/health_check_api/mod.rs @@ -0,0 +1,6 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod server; + +pub const HEALTH_CHECK_API_LOG_TARGET: &str = "HEALTH CHECK API"; diff --git a/src/servers/health_check_api/resources.rs b/src/servers/health_check_api/resources.rs new file mode 100644 index 000000000..3302fb966 --- /dev/null +++ b/src/servers/health_check_api/resources.rs @@ -0,0 +1,64 @@ +use std::net::SocketAddr; + +use serde::{Deserialize, Serialize}; + +#[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum Status { + Ok, + Error, + None, +} + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct CheckReport { + pub binding: SocketAddr, + pub info: String, + pub result: Result, +} + +impl CheckReport { + #[must_use] + pub fn pass(&self) -> bool { + self.result.is_ok() + } + #[must_use] + pub fn fail(&self) -> bool { + self.result.is_err() + } +} + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Report { + pub status: Status, + pub message: String, + pub details: Vec, +} + +impl Report { + #[must_use] + pub fn none() -> Report { + Self { + status: Status::None, + message: String::new(), + details: Vec::default(), + } + } + + #[must_use] + pub fn ok(details: Vec) -> Report { + Self { + status: Status::Ok, + message: String::new(), + details, + } + } + + #[must_use] + pub fn error(message: String, details: Vec) -> Report { + Self { + status: Status::Error, + message, + details, + } + } +} diff --git a/src/servers/health_check_api/responses.rs b/src/servers/health_check_api/responses.rs new file mode 100644 index 000000000..3796d8be4 --- /dev/null +++ b/src/servers/health_check_api/responses.rs @@ -0,0 +1,15 @@ +use axum::Json; + +use super::resources::{CheckReport, Report}; + +pub fn ok(details: Vec) -> Json { + Json(Report::ok(details)) +} + +pub fn error(message: String, details: Vec) -> Json { + Json(Report::error(message, details)) +} + +pub fn none() -> Json { + Json(Report::none()) +} diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs new file mode 100644 index 000000000..df4b1cf69 --- /dev/null +++ b/src/servers/health_check_api/server.rs @@ -0,0 +1,101 @@ +//! Logic to run the Health Check HTTP API server. +//! +//! This API is intended to be used by the container infrastructure to check if +//! the whole application is healthy. +use std::net::SocketAddr; +use std::time::Duration; + +use axum::http::HeaderName; +use axum::response::Response; +use axum::routing::get; +use axum::{Json, Router}; +use axum_server::Handle; +use futures::Future; +use hyper::Request; +use serde_json::json; +use tokio::sync::oneshot::{Receiver, Sender}; +use tower_http::compression::CompressionLayer; +use tower_http::propagate_header::PropagateHeaderLayer; +use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{instrument, Level, Span}; + +use crate::bootstrap::jobs::Started; +use crate::servers::health_check_api::handlers::health_check_handler; +use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; +use crate::servers::registar::ServiceRegistry; +use crate::servers::signals::{graceful_shutdown, Halted}; + +/// Starts Health Check API server. +/// +/// # Panics +/// +/// Will panic if binding to the socket address fails. +#[instrument(skip(bind_to, tx, rx_halt, register))] +pub fn start( + bind_to: SocketAddr, + tx: Sender, + rx_halt: Receiver, + register: ServiceRegistry, +) -> impl Future> { + let router = Router::new() + .route("/", get(|| async { Json(json!({})) })) + .route("/health_check", get(health_check_handler)) + .with_state(register) + .layer(CompressionLayer::new()) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) + .on_request(|request: &Request, _span: &Span| { + let method = request.method().to_string(); + let uri = request.uri().to_string(); + let request_id = request + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + + tracing::span!( + target: HEALTH_CHECK_API_LOG_TARGET, + tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); + }) + .on_response(|response: &Response, latency: Duration, _span: &Span| { + let status_code = response.status(); + let request_id = response + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + let latency_ms = latency.as_millis(); + + tracing::span!( + target: HEALTH_CHECK_API_LOG_TARGET, + tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); + }), + ) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)); + + let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); + let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); + + let handle = Handle::new(); + + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting service with graceful shutdown in a spawned task ..."); + + tokio::task::spawn(graceful_shutdown( + handle.clone(), + rx_halt, + format!("Shutting down http server on socket address: {address}"), + )); + + let running = axum_server::from_tcp(socket) + .handle(handle) + .serve(router.into_make_service_with_connect_info::()); + + tx.send(Started { address }) + .expect("the Health Check API server should not be dropped"); + + running +} diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs new file mode 100644 index 000000000..4ef5ca7ea --- /dev/null +++ b/src/servers/http/mod.rs @@ -0,0 +1,319 @@ +//! HTTP Tracker. +//! +//! This module contains the HTTP tracker implementation. +//! +//! The HTTP tracker is a simple HTTP server that responds to two `GET` requests: +//! +//! - `Announce`: used to announce the presence of a peer to the tracker. +//! - `Scrape`: used to get information about a torrent. +//! +//! Refer to the [`bit_torrent`](crate::shared::bit_torrent) module for more +//! information about the `BitTorrent` protocol. +//! +//! ## Table of Contents +//! +//! - [Requests](#requests) +//! - [Announce](#announce) +//! - [Scrape](#scrape) +//! - [Versioning](#versioning) +//! - [Links](#links) +//! +//! ## Requests +//! +//! ### Announce +//! +//! `Announce` requests are used to announce the presence of a peer to the +//! tracker. The tracker responds with a list of peers that are also downloading +//! the same torrent. A "swarm" is a group of peers that are downloading the +//! same torrent. +//! +//! `Announce` responses are encoded in [bencoded](https://en.wikipedia.org/wiki/Bencode) +//! format. +//! +//! There are two types of `Announce` responses: `compact` and `non-compact`. In +//! a compact response, the peers are encoded in a single string. In a +//! non-compact response, the peers are encoded in a list of dictionaries. The +//! compact response is more efficient than the non-compact response and it does +//! not contain the peer's IDs. +//! +//! **Query parameters** +//! +//! > **NOTICE**: you can click on the parameter name to see a full description +//! > after extracting and parsing the parameter from the URL query component. +//! +//! Parameter | Type | Description | Required | Default | Example +//! ---|---|---|---|---|--- +//! [`info_hash`](crate::servers::http::v1::requests::announce::Announce::info_hash) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! `peer_addr` | string |The IP address of the peer. | No | No | `2.137.87.41` +//! [`downloaded`](crate::servers::http::v1::requests::announce::Announce::downloaded) | positive integer |The number of bytes downloaded by the peer. | No | `0` | `0` +//! [`uploaded`](crate::servers::http::v1::requests::announce::Announce::uploaded) | positive integer | The number of bytes uploaded by the peer. | No | `0` | `0` +//! [`peer_id`](crate::servers::http::v1::requests::announce::Announce::peer_id) | percent encoded of 20-byte array | The ID of the peer. | Yes | No | `-qB00000000000000001` +//! [`port`](crate::servers::http::v1::requests::announce::Announce::port) | positive integer | The port used by the peer. | Yes | No | `17548` +//! [`left`](crate::servers::http::v1::requests::announce::Announce::left) | positive integer | The number of bytes pending to download. | No | `0` | `0` +//! [`event`](crate::servers::http::v1::requests::announce::Announce::event) | positive integer | The event that triggered the `Announce` request: `started`, `completed`, `stopped` | No | `None` | `completed` +//! [`compact`](crate::servers::http::v1::requests::announce::Announce::compact) | `0` or `1` | Whether the tracker should return a compact peer list. | No | `None` | `0` +//! `numwant` | positive integer | **Not implemented**. The maximum number of peers you want in the reply. | No | `50` | `50` +//! +//! Refer to the [`Announce`](crate::servers::http::v1::requests::announce::Announce) +//! request for more information about the parameters. +//! +//! > **NOTICE**: the [BEP 03](https://www.bittorrent.org/beps/bep_0003.html) +//! > defines only the `ip` and `event` parameters as optional. However, the +//! > tracker assigns default values to the optional parameters if they are not +//! > provided. +//! +//! > **NOTICE**: the `peer_addr` parameter is not part of the original +//! > specification. But the peer IP was added in the +//! > [UDP Tracker protocol](https://www.bittorrent.org/beps/bep_0015.html). It is +//! > used to provide the peer's IP address to the tracker, but it is ignored by +//! > the tracker. The tracker uses the IP address of the peer that sent the +//! > request or the right-most-ip in the `X-Forwarded-For` header if the tracker +//! > is behind a reverse proxy. +//! +//! > **NOTICE**: the maximum number of peers that the tracker can return is +//! > `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](torrust_tracker_configuration::TORRENT_PEERS_LIMIT). +//! > Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) +//! > for more information about this limitation. +//! +//! > **NOTICE**: the `info_hash` parameter is NOT a `URL` encoded string param. +//! > It is percent encode of the raw `info_hash` bytes (40 bytes). URL `GET` params +//! > can contain any bytes, not only well-formed UTF-8. The `info_hash` is a +//! > 20-byte SHA1. Check the [`percent_encoding`] +//! > module to know more about the encoding. +//! +//! > **NOTICE**: the `peer_id` parameter is NOT a `URL` encoded string param. +//! > It is percent encode of the raw peer ID bytes (20 bytes). URL `GET` params +//! > can contain any bytes, not only well-formed UTF-8. The `info_hash` is a +//! > 20-byte SHA1. Check the [`percent_encoding`] +//! > module to know more about the encoding. +//! +//! > **NOTICE**: by default, the tracker returns the non-compact peer list when +//! > no `compact` parameter is provided or is empty. The +//! > [BEP 23](https://www.bittorrent.org/beps/bep_0023.html) suggests to do the +//! > opposite. The tracker should return the compact peer list by default and +//! > return the non-compact peer list if the `compact` parameter is `0`. +//! +//! **Sample announce URL** +//! +//! A sample `GET` `announce` request: +//! +//! +//! +//! **Sample non-compact response** +//! +//! In [bencoded](https://en.wikipedia.org/wiki/Bencode) format: +//! +//! ```text +//! d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee +//! ``` +//! +//! And represented as a json: +//! +//! ```json +//! { +//! "complete": 333, +//! "incomplete": 444, +//! "interval": 111, +//! "min interval": 222, +//! "peers": [ +//! { +//! "ip": "105.105.105.105", +//! "peer id": "-qB00000000000000001", +//! "port": 28784 +//! }, +//! { +//! "ip": "6969:6969:6969:6969:6969:6969:6969:6969", +//! "peer id": "-qB00000000000000002", +//! "port": 28784 +//! } +//! ] +//! } +//! ``` +//! +//! If you save the response as a file and you open it with a program that can +//! handle binary data you would see: +//! +//! ```text +//! 00000000: 6438 3a63 6f6d 706c 6574 6569 3333 3365 d8:completei333e +//! 00000010: 3130 3a69 6e63 6f6d 706c 6574 6569 3434 10:incompletei44 +//! 00000020: 3465 383a 696e 7465 7276 616c 6931 3131 4e8:intervali111 +//! 00000030: 6531 323a 6d69 6e20 696e 7465 7276 616c e12:min interval +//! 00000040: 6932 3232 6535 3a70 6565 7273 6c64 323a i222e5:peersld2: +//! 00000050: 6970 3135 3a31 3035 2e31 3035 2e31 3035 ip15:105.105.105 +//! 00000060: 2e31 3035 373a 7065 6572 2069 6432 303a .1057:peer id20: +//! 00000070: 2d71 4230 3030 3030 3030 3030 3030 3030 -qB0000000000000 +//! 00000080: 3030 3031 343a 706f 7274 6932 3837 3834 00014:porti28784 +//! 00000090: 6565 6432 3a69 7033 393a 3639 3639 3a36 eed2:ip39:6969:6 +//! 000000a0: 3936 393a 3639 3639 3a36 3936 393a 3639 969:6969:6969:69 +//! 000000b0: 3639 3a36 3936 393a 3639 3639 3a36 3936 69:6969:6969:696 +//! 000000c0: 3937 3a70 6565 7220 6964 3230 3a2d 7142 97:peer id20:-qB +//! 000000d0: 3030 3030 3030 3030 3030 3030 3030 3030 0000000000000000 +//! 000000e0: 3234 3a70 6f72 7469 3238 3738 3465 6565 24:porti28784eee +//! 000000f0: 65 e +//! ``` +//! +//! Refer to the [`Normal`](crate::servers::http::v1::responses::announce::Normal), i.e. `Non-Compact` +//! response for more information about the response. +//! +//! **Sample compact response** +//! +//! In [bencoded](https://en.wikipedia.org/wiki/Bencode) format: +//! +//! ```text +//! d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe +//! ``` +//! +//! And represented as a json: +//! +//! ```json +//! { +//! "complete": 333, +//! "incomplete": 444, +//! "interval": 111, +//! "min interval": 222, +//! "peers": "iiiipp", +//! "peers6": "iiiiiiiiiiiiiiiipp" +//! } +//! ``` +//! +//! If you save the response as a file and you open it with a program that can +//! handle binary data you would see: +//! +//! ```text +//! 0000000: 6438 3a63 6f6d 706c 6574 6569 3333 3365 d8:completei333e +//! 0000010: 3130 3a69 6e63 6f6d 706c 6574 6569 3434 10:incompletei44 +//! 0000020: 3465 383a 696e 7465 7276 616c 6931 3131 4e8:intervali111 +//! 0000030: 6531 323a 6d69 6e20 696e 7465 7276 616c e12:min interval +//! 0000040: 6932 3232 6535 3a70 6565 7273 363a 6969 i222e5:peers6:ii +//! 0000050: 6969 7070 363a 7065 6572 7336 3138 3a69 iipp6:peers618:i +//! 0000060: 6969 6969 6969 6969 6969 6969 6969 6970 iiiiiiiiiiiiiiip +//! 0000070: 7065 pe +//! ``` +//! +//! Refer to the [`Compact`](crate::servers::http::v1::responses::announce::Compact) +//! response for more information about the response. +//! +//! **Protocol** +//! +//! Original specification in [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). +//! +//! If you want to know more about the `announce` request: +//! +//! - [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [Vuze announce docs](https://wiki.vuze.com/w/Announce) +//! - [wiki.theory.org - Announce](https://wiki.theory.org/BitTorrent_Tracker_Protocol#Basic_Tracker_Announce_Request) +//! +//! ### Scrape +//! +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) +//! for multiple torrents at the same time. +//! +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) +//! for that torrent: +//! +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) +//! +//! **Query parameters** +//! +//! Parameter | Type | Description | Required | Default | Example +//! ---|---|---|---|---|--- +//! [`info_hash`](crate::servers::http::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! +//! > **NOTICE**: you can scrape multiple torrents at the same time by passing +//! > multiple `info_hash` parameters. +//! +//! Refer to the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +//! request for more information about the parameters. +//! +//! **Sample scrape URL** +//! +//! A sample `scrape` request for only one torrent: +//! +//! +//! +//! In order to scrape multiple torrents at the same time you can pass multiple +//! `info_hash` parameters: `info_hash=%81%00%0...00%00%00&info_hash=%82%00%0...00%00%00` +//! +//! > **NOTICE**: the maximum number of torrents you can scrape at the same time +//! > is `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! +//! **Sample response** +//! +//! The `scrape` response is a [bencoded](https://en.wikipedia.org/wiki/Bencode) +//! byte array like the following: +//! +//! ```text +//! d5:filesd20:iiiiiiiiiiiiiiiiiiiid8:completei1e10:downloadedi2e10:incompletei3eeee +//! ``` +//! +//! And represented as a json: +//! +//! ```json +//! { +//! "files": { +//! "iiiiiiiiiiiiiiiiiiii": { +//! "complete": 1, +//! "downloaded": 2, +//! "incomplete": 3 +//! } +//! } +//! } +//! ``` +//! +//! Where the `files` key contains a dictionary of dictionaries. The first +//! dictionary key is the `info_hash` of the torrent (`iiiiiiiiiiiiiiiiiiii` in +//! the example). The second level dictionary contains the +//! [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) for that torrent. +//! +//! If you save the response as a file and you open it with a program that +//! can handle binary data you would see: +//! +//! ```text +//! 00000000: 6435 3a66 696c 6573 6432 303a 6969 6969 d5:filesd20:iiii +//! 00000010: 6969 6969 6969 6969 6969 6969 6969 6969 iiiiiiiiiiiiiiii +//! 00000020: 6438 3a63 6f6d 706c 6574 6569 3165 3130 d8:completei1e10 +//! 00000030: 3a64 6f77 6e6c 6f61 6465 6469 3265 3130 :downloadedi2e10 +//! 00000040: 3a69 6e63 6f6d 706c 6574 6569 3365 6565 :incompletei3eee +//! 00000050: 65 e +//! ``` +//! +//! **Protocol** +//! +//! If you want to know more about the `scrape` request: +//! +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! - [Vuze scrape docs](https://wiki.vuze.com/w/Scrape) +//! +//! ## Versioning +//! +//! Right not there is only version `v1`. The HTTP tracker implements BEPS: +//! +//! - [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 07. IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! +//! In the future there could be a `v2` that implements new BEPS with breaking +//! changes. +//! +//! ## Links +//! +//! - [Bencode](https://en.wikipedia.org/wiki/Bencode). +//! - [Bencode to Json Online converter](https://chocobo1.github.io/bencode_online). +use serde::{Deserialize, Serialize}; + +pub mod percent_encoding; +pub mod server; +pub mod v1; + +pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; + +/// The version of the HTTP tracker. +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +pub enum Version { + /// The `v1` version of the HTTP tracker. + V1, +} diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs new file mode 100644 index 000000000..c3243d597 --- /dev/null +++ b/src/servers/http/percent_encoding.rs @@ -0,0 +1,128 @@ +//! This module contains functions for percent decoding infohashes and peer IDs. +//! +//! Percent encoding is an encoding format used to encode arbitrary data in a +//! format that is safe to use in URLs. It is used by the HTTP tracker protocol +//! to encode infohashes and peer ids in the URLs of requests. +//! +//! `BitTorrent` infohashes and peer ids are percent encoded like any other +//! arbitrary URL parameter. But they are encoded from binary data (byte arrays) +//! which may not be valid UTF-8. That makes hard to use the `percent_encoding` +//! crate to decode them because all of them expect a well-formed UTF-8 string. +//! However, percent encoding is not limited to UTF-8 strings. +//! +//! More information about "Percent Encoding" can be found here: +//! +//! - +//! - +//! - +use aquatic_udp_protocol::PeerId; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; + +/// Percent decodes a percent encoded infohash. Internally an +/// [`InfoHash`] is a 20-byte array. +/// +/// For example, given the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, +/// it's percent encoded representation is `%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0`. +/// +/// ```rust +/// use std::str::FromStr; +/// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; +/// +/// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; +/// +/// let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); +/// +/// assert_eq!( +/// info_hash, +/// InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() +/// ); +/// ``` +/// +/// # Errors +/// +/// Will return `Err` if the decoded bytes do not represent a valid +/// [`InfoHash`]. +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); + InfoHash::try_from(bytes) +} + +/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](PeerId) +/// is a 20-byte array. +/// +/// For example, given the peer id `*b"-qB00000000000000000"`, +/// it's percent encoded representation is `%2DqB00000000000000000`. +/// +/// ```rust +/// use std::str::FromStr; +/// +/// use aquatic_udp_protocol::PeerId; +/// use torrust_tracker::servers::http::percent_encoding::percent_decode_peer_id; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// +/// let encoded_peer_id = "%2DqB00000000000000000"; +/// +/// let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); +/// +/// assert_eq!(peer_id, PeerId(*b"-qB00000000000000000")); +/// ``` +/// +/// # Errors +/// +/// Will return `Err` if if the decoded bytes do not represent a valid [`PeerId`]. +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); + Ok(*peer::Id::try_from(bytes)?) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use aquatic_udp_protocol::PeerId; + use torrust_tracker_primitives::info_hash::InfoHash; + + use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; + + #[test] + fn it_should_decode_a_percent_encoded_info_hash() { + let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; + + let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() + ); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_info_hash() { + let invalid_encoded_infohash = "invalid percent-encoded infohash"; + + let info_hash = percent_decode_info_hash(invalid_encoded_infohash); + + assert!(info_hash.is_err()); + } + + #[test] + fn it_should_decode_a_percent_encoded_peer_id() { + let encoded_peer_id = "%2DqB00000000000000000"; + + let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); + + assert_eq!(peer_id, PeerId(*b"-qB00000000000000000")); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_peer_id() { + let invalid_encoded_peer_id = "invalid percent-encoded peer id"; + + let peer_id = percent_decode_peer_id(invalid_encoded_peer_id); + + assert!(peer_id.is_err()); + } +} diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs new file mode 100644 index 000000000..560d91681 --- /dev/null +++ b/src/servers/http/server.rs @@ -0,0 +1,263 @@ +//! Module to handle the HTTP server instances. +use std::net::SocketAddr; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use axum_server::Handle; +use derive_more::Constructor; +use futures::future::BoxFuture; +use tokio::sync::oneshot::{Receiver, Sender}; +use tracing::instrument; + +use super::v1::routes::router; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; +use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::servers::logging::STARTED_ON; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; +use crate::servers::signals::{graceful_shutdown, Halted}; + +/// Error that can occur when starting or stopping the HTTP server. +/// +/// Some errors triggered while starting the server are: +/// +/// - The spawned server cannot send its `SocketAddr` back to the main thread. +/// - The launcher cannot receive the `SocketAddr` from the spawned server. +/// +/// Some errors triggered while stopping the server are: +/// +/// - The channel to send the shutdown signal to the server is closed. +/// - The task to shutdown the server on the spawned server failed to execute to +/// completion. +#[derive(Debug)] +pub enum Error { + Error(String), +} + +#[derive(Constructor, Debug)] +pub struct Launcher { + pub bind_to: SocketAddr, + pub tls: Option, +} + +impl Launcher { + #[instrument(skip(self, tracker, tx_start, rx_halt))] + fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> BoxFuture<'static, ()> { + let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); + + let handle = Handle::new(); + + tokio::task::spawn(graceful_shutdown( + handle.clone(), + rx_halt, + format!("Shutting down HTTP server on socket address: {address}"), + )); + + let tls = self.tls.clone(); + let protocol = if tls.is_some() { "https" } else { "http" }; + + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); + + let app = router(tracker, address); + + let running = Box::pin(async { + match tls { + Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .handle(handle) + // The TimeoutAcceptor is commented because TSL does not work with it. + // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 + //.acceptor(TimeoutAcceptor) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."), + None => custom_axum_server::from_tcp_with_timeouts(socket) + .handle(handle) + .acceptor(TimeoutAcceptor) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."), + } + }); + + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); + + tx_start + .send(Started { address }) + .expect("the HTTP(s) Tracker service should not be dropped"); + + running + } +} + +/// A HTTP server instance controller with no HTTP instance running. +#[allow(clippy::module_name_repetitions)] +pub type StoppedHttpServer = HttpServer; + +/// A HTTP server instance controller with a running HTTP instance. +#[allow(clippy::module_name_repetitions)] +pub type RunningHttpServer = HttpServer; + +/// A HTTP server instance controller. +/// +/// It's responsible for: +/// +/// - Keeping the initial configuration of the server. +/// - Starting and stopping the server. +/// - Keeping the state of the server: `running` or `stopped`. +/// +/// It's an state machine. Configurations cannot be changed. This struct +/// represents concrete configuration and state. It allows to start and stop the +/// server but always keeping the same configuration. +/// +/// > **NOTICE**: if the configurations changes after running the server it will +/// > reset to the initial value after stopping the server. This struct is not +/// > intended to persist configurations between runs. +#[allow(clippy::module_name_repetitions)] +pub struct HttpServer { + /// The state of the server: `running` or `stopped`. + pub state: S, +} + +/// A stopped HTTP server state. +pub struct Stopped { + launcher: Launcher, +} + +/// A running HTTP server state. +pub struct Running { + /// The address where the server is bound. + pub binding: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: tokio::task::JoinHandle, +} + +impl HttpServer { + /// It creates a new `HttpServer` controller in `stopped` state. + #[must_use] + pub fn new(launcher: Launcher) -> Self { + Self { + state: Stopped { launcher }, + } + } + + /// It starts the server and returns a `HttpServer` controller in `running` + /// state. + /// + /// # Errors + /// + /// It would return an error if no `SocketAddr` is returned after launching the server. + /// + /// # Panics + /// + /// It would panic spawned HTTP server launcher cannot send the bound `SocketAddr` + /// back to the main thread. + pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, Error> { + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + let launcher = self.state.launcher; + + let task = tokio::spawn(async move { + let server = launcher.start(tracker, tx_start, rx_halt); + + server.await; + + launcher + }); + + let binding = rx_start.await.expect("it should be able to start the service").address; + + form.send(ServiceRegistration::new(binding, check_fn)) + .expect("it should be able to send service registration"); + + Ok(HttpServer { + state: Running { + binding, + halt_task: tx_halt, + task, + }, + }) + } +} + +impl HttpServer { + /// It stops the server and returns a `HttpServer` controller in `stopped` + /// state. + /// + /// # Errors + /// + /// It would return an error if the channel for the task killer signal was closed. + pub async fn stop(self) -> Result, Error> { + self.state + .halt_task + .send(Halted::Normal) + .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; + + let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; + + Ok(HttpServer { + state: Stopped { launcher }, + }) + } +} + +/// Checks the Health by connecting to the HTTP tracker endpoint. +/// +/// # Errors +/// +/// This function will return an error if unable to connect. +/// Or if the request returns an error. +#[must_use] +pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { + let url = format!("http://{binding}/health_check"); // DevSkim: ignore DS137138 + + let info = format!("checking http tracker health check at: {url}"); + + let job = tokio::spawn(async move { + match reqwest::get(url).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err.to_string()), + } + }); + + ServiceHealthCheckJob::new(*binding, info, job) +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration::ephemeral_public; + + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::make_rust_tls; + use crate::servers::http::server::{HttpServer, Launcher}; + use crate::servers::registar::Registar; + + #[tokio::test] + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_public()); + let tracker = initialize_with_configuration(&cfg); + let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); + let config = &http_trackers[0]; + + let bind_to = config.bind_address; + + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("tls config failed")); + + let register = &Registar::default(); + + let stopped = HttpServer::new(Launcher::new(bind_to, tls)); + let started = stopped + .start(tracker, register.give_form()) + .await + .expect("it should start the server"); + let stopped = started.stop().await.expect("it should stop the server"); + + assert_eq!(stopped.state.launcher.bind_to, bind_to); + } +} diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs new file mode 100644 index 000000000..324e91bf2 --- /dev/null +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -0,0 +1,158 @@ +//! Axum [`extractor`](axum::extract) for the [`Announce`] +//! request. +//! +//! It parses the query parameters returning an [`Announce`] +//! request. +//! +//! Refer to [`Announce`](crate::servers::http::v1::requests::announce) for more +//! information about the returned structure. +//! +//! It returns a bencoded [`Error`](crate::servers::http::v1::responses::error) +//! response (`500`) if the query parameters are missing or invalid. +//! +//! **Sample announce request** +//! +//! +//! +//! **Sample error response** +//! +//! Missing query params for `announce` request: +//! +//! ```text +//! d14:failure reason149:Cannot parse query params for announce request: missing query params for announce request in src/servers/http/v1/extractors/announce_request.rs:54:23e +//! ``` +//! +//! Invalid query param (`info_hash`): +//! +//! ```text +//! d14:failure reason240:Cannot parse query params for announce request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/announce.rs:182:42e +//! ``` +use std::panic::Location; + +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; +use futures::future::BoxFuture; +use futures::FutureExt; + +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::requests::announce::{Announce, ParseAnnounceQueryError}; +use crate::servers::http::v1::responses; + +/// Extractor for the [`Announce`] +/// request. +pub struct ExtractRequest(pub Announce); + +impl FromRequestParts for ExtractRequest +where + S: Send + Sync, +{ + type Rejection = Response; + + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + _state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + match extract_announce_from(parts.uri.query()) { + Ok(announce_request) => Ok(ExtractRequest(announce_request)), + Err(error) => Err(error.into_response()), + } + } + .boxed() + } +} + +fn extract_announce_from(maybe_raw_query: Option<&str>) -> Result { + if maybe_raw_query.is_none() { + return Err(responses::error::Error::from(ParseAnnounceQueryError::MissingParams { + location: Location::caller(), + })); + } + + let query = maybe_raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error)); + } + + let announce_request = Announce::try_from(query.unwrap()); + + if let Err(error) = announce_request { + return Err(responses::error::Error::from(error)); + } + + Ok(announce_request.unwrap()) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use aquatic_udp_protocol::{NumberOfBytes, PeerId}; + use torrust_tracker_primitives::info_hash::InfoHash; + + use super::extract_announce_from; + use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; + use crate::servers::http::v1::responses::error::Error; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_extract_the_announce_request_from_the_url_query_params() { + let raw_query = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0&numwant=50"; + + let announce = extract_announce_from(Some(raw_query)).unwrap(); + + assert_eq!( + announce, + Announce { + info_hash: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), + peer_id: PeerId(*b"-qB00000000000000001"), + port: 17548, + downloaded: Some(NumberOfBytes::new(0)), + uploaded: Some(NumberOfBytes::new(0)), + left: Some(NumberOfBytes::new(0)), + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + numwant: Some(50), + } + ); + } + + #[test] + fn it_should_reject_a_request_without_query_params() { + let response = extract_announce_from(None).unwrap_err(); + + assert_error_response( + &response, + "Cannot parse query params for announce request: missing query params for announce request", + ); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed() { + let invalid_query = "param1=value1=value2"; + let response = extract_announce_from(Some(invalid_query)).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params"); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_an_announce_request() { + let response = extract_announce_from(Some("param1=value1")).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params for announce request"); + } +} diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs new file mode 100644 index 000000000..e86241edf --- /dev/null +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -0,0 +1,161 @@ +//! Axum [`extractor`](axum::extract) to extract the authentication [`Key`] +//! from the URL path. +//! +//! It's only used when the tracker is running in private mode. +//! +//! Given the following URL route with a path param: `/announce/:key`, +//! it extracts the `key` param from the URL path. +//! +//! It's a wrapper for Axum `Path` extractor in order to return custom +//! authentication errors. +//! +//! It returns a bencoded [`Error`](crate::servers::http::v1::responses::error) +//! response (`500`) if the `key` parameter are missing or invalid. +//! +//! **Sample authentication error responses** +//! +//! When the key param is **missing**: +//! +//! ```text +//! d14:failure reason131:Authentication error: Missing authentication key param for private tracker. Error in src/servers/http/v1/handlers/announce.rs:79:31e +//! ``` +//! +//! When the key param has an **invalid format**: +//! +//! ```text +//! d14:failure reason134:Authentication error: Invalid format for authentication key param. Error in src/servers/http/v1/extractors/authentication_key.rs:73:23e +//! ``` +//! +//! When the key is **not found** in the database: +//! +//! ```text +//! d14:failure reason101:Authentication error: Failed to read key: YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ, src/tracker/mod.rs:848:27e +//! ``` +//! +//! When the key is found in the database but it's **expired**: +//! +//! ```text +//! d14:failure reason64:Authentication error: Key has expired, src/tracker/auth.rs:88:23e +//! ``` +//! +//! > **NOTICE**: the returned HTTP status code is always `200` for authentication errors. +//! > Neither [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! > nor [The Private Torrents](https://www.bittorrent.org/beps/bep_0027.html) +//! > specifications specify any HTTP status code for authentication errors. +use std::panic::Location; + +use axum::extract::rejection::PathRejection; +use axum::extract::{FromRequestParts, Path}; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; +use futures::future::BoxFuture; +use futures::FutureExt; +use serde::Deserialize; + +use crate::core::auth::Key; +use crate::servers::http::v1::handlers::common::auth; +use crate::servers::http::v1::responses; + +/// Extractor for the [`Key`] struct. +pub struct Extract(pub Key); + +#[derive(Deserialize)] +pub struct KeyParam(String); + +impl KeyParam { + #[must_use] + pub fn value(&self) -> String { + self.0.clone() + } +} + +impl FromRequestParts for Extract +where + S: Send + Sync, +{ + type Rejection = Response; + + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + // Extract `key` from URL path with Axum `Path` extractor + let maybe_path_with_key = Path::::from_request_parts(parts, state).await; + + match extract_key(maybe_path_with_key) { + Ok(key) => Ok(Extract(key)), + Err(error) => Err(error.into_response()), + } + } + .boxed() + } +} + +fn extract_key(path_extractor_result: Result, PathRejection>) -> Result { + match path_extractor_result { + Ok(key_param) => match parse_key(&key_param.0.value()) { + Ok(key) => Ok(key), + Err(error) => Err(error), + }, + Err(path_rejection) => Err(custom_error(&path_rejection)), + } +} + +fn parse_key(key: &str) -> Result { + let key = key.parse::(); + + match key { + Ok(key) => Ok(key), + Err(_parse_key_error) => Err(responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + })), + } +} + +fn custom_error(rejection: &PathRejection) -> responses::error::Error { + match rejection { + axum::extract::rejection::PathRejection::FailedToDeserializePathParams(_) => { + responses::error::Error::from(auth::Error::InvalidKeyFormat { + location: Location::caller(), + }) + } + axum::extract::rejection::PathRejection::MissingPathParams(_) => { + responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + }) + } + _ => responses::error::Error::from(auth::Error::CannotExtractKeyParam { + location: Location::caller(), + }), + } +} + +#[cfg(test)] +mod tests { + + use super::parse_key; + use crate::servers::http::v1::responses::error::Error; + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_return_an_authentication_error_if_the_key_cannot_be_parsed() { + let invalid_key = "invalid_key"; + + let response = parse_key(invalid_key).unwrap_err(); + + assert_error_response(&response, "Authentication error: Invalid format for authentication key param"); + } +} diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs new file mode 100644 index 000000000..5b235fbe0 --- /dev/null +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -0,0 +1,87 @@ +//! Axum [`extractor`](axum::extract) to get the relevant information to resolve the remote +//! client IP. +//! +//! It's a wrapper for two third-party Axum extractors. +//! +//! The first one is `RightmostXForwardedFor` from the `axum-client-ip` crate. +//! This extractor is used to get the right-most IP address from the +//! `X-Forwarded-For` header. +//! +//! The second one is `ConnectInfo` from the `axum` crate. This extractor is +//! used to get the IP address of the client from the connection info. +//! +//! The `ClientIpSources` struct is a wrapper for the two extractors. +//! +//! The tracker can be configured to run behind a reverse proxy. In this case, +//! the tracker will use the `X-Forwarded-For` header to get the client IP +//! address. +//! +//! See [`torrust_tracker_configuration::Configuration::core.on_reverse_proxy`]. +//! +//! The tracker can also be configured to run without a reverse proxy. In this +//! case, the tracker will use the IP address from the connection info. +//! +//! Given the following scenario: +//! +//! ```text +//! client <-> http proxy 1 <-> http proxy 2 <-> server +//! ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 +//! X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 +//! ``` +//! +//! This extractor returns these values: +//! +//! ```text +//! `right_most_x_forwarded_for` = 126.0.0.2 +//! `connection_info_ip` = 126.0.0.3 +//! ``` +use std::net::SocketAddr; + +use axum::extract::{ConnectInfo, FromRequestParts}; +use axum::http::request::Parts; +use axum::response::Response; +use axum_client_ip::RightmostXForwardedFor; +use futures::future::BoxFuture; +use futures::FutureExt; + +use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + +/// Extractor for the [`ClientIpSources`] +/// struct. +pub struct Extract(pub ClientIpSources); + +impl FromRequestParts for Extract +where + S: Send + Sync, +{ + type Rejection = Response; + + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + let right_most_x_forwarded_for = match RightmostXForwardedFor::from_request_parts(parts, state).await { + Ok(right_most_x_forwarded_for) => Some(right_most_x_forwarded_for.0), + Err(_) => None, + }; + + let connection_info_ip = match ConnectInfo::::from_request_parts(parts, state).await { + Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), + Err(_) => None, + }; + + Ok(Extract(ClientIpSources { + right_most_x_forwarded_for, + connection_info_ip, + })) + } + .boxed() + } +} diff --git a/src/servers/http/v1/extractors/mod.rs b/src/servers/http/v1/extractors/mod.rs new file mode 100644 index 000000000..beab3f2b8 --- /dev/null +++ b/src/servers/http/v1/extractors/mod.rs @@ -0,0 +1,8 @@ +//! Axum [`extractors`](axum::extract) for the HTTP server. +//! +//! This module contains the extractors used by the HTTP server to parse the +//! incoming requests. +pub mod announce_request; +pub mod authentication_key; +pub mod client_ip_sources; +pub mod scrape_request; diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs new file mode 100644 index 000000000..07fa4ccb9 --- /dev/null +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -0,0 +1,179 @@ +//! Axum [`extractor`](axum::extract) for the [`Scrape`] +//! request. +//! +//! It parses the query parameters returning an [`Scrape`] +//! request. +//! +//! Refer to [`Scrape`](crate::servers::http::v1::requests::scrape) for more +//! information about the returned structure. +//! +//! It returns a bencoded [`Error`](crate::servers::http::v1::responses::error) +//! response (`500`) if the query parameters are missing or invalid. +//! +//! **Sample scrape request** +//! +//! +//! +//! **Sample error response** +//! +//! Missing query params for scrape request: +//! +//! ```text +//! d14:failure reason143:Cannot parse query params for scrape request: missing query params for scrape request in src/servers/http/v1/extractors/scrape_request.rs:52:23e +//! ``` +//! +//! Invalid query params for scrape request: +//! +//! ```text +//! d14:failure reason235:Cannot parse query params for scrape request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/scrape.rs:66:46e +//! ``` +use std::panic::Location; + +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::response::{IntoResponse, Response}; +use futures::future::BoxFuture; +use futures::FutureExt; + +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; +use crate::servers::http::v1::responses; + +/// Extractor for the [`Scrape`] +/// request. +pub struct ExtractRequest(pub Scrape); + +impl FromRequestParts for ExtractRequest +where + S: Send + Sync, +{ + type Rejection = Response; + + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + _state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + match extract_scrape_from(parts.uri.query()) { + Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), + Err(error) => Err(error.into_response()), + } + } + .boxed() + } +} + +fn extract_scrape_from(maybe_raw_query: Option<&str>) -> Result { + if maybe_raw_query.is_none() { + return Err(responses::error::Error::from(ParseScrapeQueryError::MissingParams { + location: Location::caller(), + })); + } + + let query = maybe_raw_query.unwrap().parse::(); + + if let Err(error) = query { + return Err(responses::error::Error::from(error)); + } + + let scrape_request = Scrape::try_from(query.unwrap()); + + if let Err(error) = scrape_request { + return Err(responses::error::Error::from(error)); + } + + Ok(scrape_request.unwrap()) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use torrust_tracker_primitives::info_hash::InfoHash; + + use super::extract_scrape_from; + use crate::servers::http::v1::requests::scrape::Scrape; + use crate::servers::http::v1::responses::error::Error; + + struct TestInfoHash { + pub bencoded: String, + pub value: InfoHash, + } + + fn test_info_hash() -> TestInfoHash { + TestInfoHash { + bencoded: "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0".to_owned(), + value: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), + } + } + + fn assert_error_response(error: &Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_extract_the_scrape_request_from_the_url_query_params() { + let info_hash = test_info_hash(); + + let raw_query = format!("info_hash={}", info_hash.bencoded); + + let scrape = extract_scrape_from(Some(&raw_query)).unwrap(); + + assert_eq!( + scrape, + Scrape { + info_hashes: vec![info_hash.value], + } + ); + } + + #[test] + fn it_should_extract_the_scrape_request_from_the_url_query_params_with_more_than_one_info_hash() { + let info_hash = test_info_hash(); + + let raw_query = format!("info_hash={}&info_hash={}", info_hash.bencoded, info_hash.bencoded); + + let scrape = extract_scrape_from(Some(&raw_query)).unwrap(); + + assert_eq!( + scrape, + Scrape { + info_hashes: vec![info_hash.value, info_hash.value], + } + ); + } + + #[test] + fn it_should_reject_a_request_without_query_params() { + let response = extract_scrape_from(None).unwrap_err(); + + assert_error_response( + &response, + "Cannot parse query params for scrape request: missing query params for scrape request", + ); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed() { + let invalid_query = "param1=value1=value2"; + let response = extract_scrape_from(Some(invalid_query)).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params"); + } + + #[test] + fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_a_scrape_request() { + let response = extract_scrape_from(Some("param1=value1")).unwrap_err(); + + assert_error_response(&response, "Cannot parse query params for scrape request"); + } +} diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs new file mode 100644 index 000000000..1c7796fca --- /dev/null +++ b/src/servers/http/v1/handlers/announce.rs @@ -0,0 +1,357 @@ +//! Axum [`handlers`](axum#handlers) for the `announce` requests. +//! +//! Refer to [HTTP server](crate::servers::http) for more information about the +//! `announce` request. +//! +//! The handlers perform the authentication and authorization of the request, +//! and resolve the client IP address. +use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; +use std::sync::Arc; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use axum::extract::State; +use axum::response::{IntoResponse, Response}; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::peer; + +use crate::core::auth::Key; +use crate::core::{AnnounceData, PeersWanted, Tracker}; +use crate::servers::http::v1::extractors::announce_request::ExtractRequest; +use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::servers::http::v1::handlers::common::auth; +use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; +use crate::servers::http::v1::responses::{self}; +use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; +use crate::servers::http::v1::services::{self, peer_ip_resolver}; +use crate::CurrentClock; + +/// It handles the `announce` request when the HTTP tracker does not require +/// authentication (no PATH `key` parameter required). +#[allow(clippy::unused_async)] +pub async fn handle_without_key( + State(tracker): State>, + ExtractRequest(announce_request): ExtractRequest, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, +) -> Response { + tracing::debug!("http announce request: {:#?}", announce_request); + + handle(&tracker, &announce_request, &client_ip_sources, None).await +} + +/// It handles the `announce` request when the HTTP tracker requires +/// authentication (PATH `key` parameter required). +#[allow(clippy::unused_async)] +pub async fn handle_with_key( + State(tracker): State>, + ExtractRequest(announce_request): ExtractRequest, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, + ExtractKey(key): ExtractKey, +) -> Response { + tracing::debug!("http announce request: {:#?}", announce_request); + + handle(&tracker, &announce_request, &client_ip_sources, Some(key)).await +} + +/// It handles the `announce` request. +/// +/// Internal implementation that handles both the `authenticated` and +/// `unauthenticated` modes. +async fn handle( + tracker: &Arc, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Response { + let announce_data = match handle_announce(tracker, announce_request, client_ip_sources, maybe_key).await { + Ok(announce_data) => announce_data, + Err(error) => return error.into_response(), + }; + build_response(announce_request, announce_data) +} + +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. + See https://github.com/torrust/torrust-tracker/discussions/240. +*/ + +async fn handle_announce( + tracker: &Arc, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Result { + // Authentication + if tracker.requires_authentication() { + match maybe_key { + Some(key) => match tracker.authenticate(&key).await { + Ok(()) => (), + Err(error) => return Err(responses::error::Error::from(error)), + }, + None => { + return Err(responses::error::Error::from(auth::Error::MissingAuthKey { + location: Location::caller(), + })) + } + } + } + + // Authorization + match tracker.authorize(&announce_request.info_hash).await { + Ok(()) => (), + Err(error) => return Err(responses::error::Error::from(error)), + } + + let peer_ip = match peer_ip_resolver::invoke(tracker.is_behind_reverse_proxy(), client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(responses::error::Error::from(error)), + }; + + let mut peer = peer_from_request(announce_request, &peer_ip); + let peers_wanted = match announce_request.numwant { + Some(numwant) => PeersWanted::only(numwant), + None => PeersWanted::All, + }; + + let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer, &peers_wanted).await; + + Ok(announce_data) +} + +fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { + if announce_request.compact.as_ref().is_some_and(|f| *f == Compact::Accepted) { + let response: responses::Announce = announce_data.into(); + response.into_response() + } else { + let response: responses::Announce = announce_data.into(); + response.into_response() + } +} + +/// It builds a `Peer` from the announce request. +/// +/// It ignores the peer address in the announce request params. +#[must_use] +fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { + peer_id: announce_request.peer_id, + peer_addr: SocketAddr::new(*peer_ip, announce_request.port), + updated: CurrentClock::now(), + uploaded: announce_request.uploaded.unwrap_or(NumberOfBytes::new(0)), + downloaded: announce_request.downloaded.unwrap_or(NumberOfBytes::new(0)), + left: announce_request.left.unwrap_or(NumberOfBytes::new(0)), + event: map_to_torrust_event(&announce_request.event), + } +} + +#[must_use] +pub fn map_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => aquatic_udp_protocol::AnnounceEvent::Started, + Event::Stopped => aquatic_udp_protocol::AnnounceEvent::Stopped, + Event::Completed => aquatic_udp_protocol::AnnounceEvent::Completed, + }, + None => aquatic_udp_protocol::AnnounceEvent::None, + } +} + +#[must_use] +pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => AnnounceEvent::Started, + Event::Stopped => AnnounceEvent::Stopped, + Event::Completed => AnnounceEvent::Completed, + }, + None => AnnounceEvent::None, + } +} + +#[cfg(test)] +mod tests { + + use aquatic_udp_protocol::PeerId; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::core::services::tracker_factory; + use crate::core::Tracker; + use crate::servers::http::v1::requests::announce::Announce; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + fn private_tracker() -> Tracker { + tracker_factory(&configuration::ephemeral_private()) + } + + fn whitelisted_tracker() -> Tracker { + tracker_factory(&configuration::ephemeral_listed()) + } + + fn tracker_on_reverse_proxy() -> Tracker { + tracker_factory(&configuration::ephemeral_with_reverse_proxy()) + } + + fn tracker_not_on_reverse_proxy() -> Tracker { + tracker_factory(&configuration::ephemeral_without_reverse_proxy()) + } + + fn sample_announce_request() -> Announce { + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: PeerId(*b"-qB00000000000000001"), + port: 17548, + downloaded: None, + uploaded: None, + left: None, + event: None, + compact: None, + numwant: None, + } + } + + fn sample_client_ip_sources() -> ClientIpSources { + ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + } + } + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod with_tracker_in_private_mode { + + use std::str::FromStr; + use std::sync::Arc; + + use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; + use crate::core::auth; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + + #[tokio::test] + async fn it_should_fail_when_the_authentication_key_is_missing() { + let tracker = Arc::new(private_tracker()); + + let maybe_key = None; + + let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Authentication error: Missing authentication key param for private tracker", + ); + } + + #[tokio::test] + async fn it_should_fail_when_the_authentication_key_is_invalid() { + let tracker = Arc::new(private_tracker()); + + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let maybe_key = Some(unregistered_key); + + let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) + .await + .unwrap_err(); + + assert_error_response(&response, "Authentication error: Failed to read key"); + } + } + + mod with_tracker_in_listed_mode { + + use std::sync::Arc; + + use super::{sample_announce_request, sample_client_ip_sources, whitelisted_tracker}; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + + #[tokio::test] + async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { + let tracker = Arc::new(whitelisted_tracker()); + + let announce_request = sample_announce_request(); + + let response = handle_announce(&tracker, &announce_request, &sample_client_ip_sources(), None) + .await + .unwrap_err(); + + assert_error_response( + &response, + &format!( + "Tracker error: The torrent: {}, is not whitelisted", + announce_request.info_hash + ), + ); + } + } + + mod with_tracker_on_reverse_proxy { + + use std::sync::Arc; + + use super::{sample_announce_request, tracker_on_reverse_proxy}; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { + let tracker = Arc::new(tracker_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } + } + + mod with_tracker_not_on_reverse_proxy { + + use std::sync::Arc; + + use super::{sample_announce_request, tracker_not_on_reverse_proxy}; + use crate::servers::http::v1::handlers::announce::handle_announce; + use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { + let tracker = Arc::new(tracker_not_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } +} diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs new file mode 100644 index 000000000..f9a7796a4 --- /dev/null +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -0,0 +1,40 @@ +//! HTTP server authentication error and conversion to +//! [`responses::error::Error`] +//! response. +use std::panic::Location; + +use thiserror::Error; + +use crate::core::auth; +use crate::servers::http::v1::responses; + +/// Authentication error. +/// +/// When the tracker is private, the authentication key is required in the URL +/// path. These are the possible errors that can occur when extracting the key +/// from the URL path. +#[derive(Debug, Error)] +pub enum Error { + #[error("Missing authentication key param for private tracker. Error in {location}")] + MissingAuthKey { location: &'static Location<'static> }, + #[error("Invalid format for authentication key param. Error in {location}")] + InvalidKeyFormat { location: &'static Location<'static> }, + #[error("Cannot extract authentication key param from URL path. Error in {location}")] + CannotExtractKeyParam { location: &'static Location<'static> }, +} + +impl From for responses::error::Error { + fn from(err: Error) -> Self { + responses::error::Error { + failure_reason: format!("Authentication error: {err}"), + } + } +} + +impl From for responses::error::Error { + fn from(err: auth::Error) -> Self { + responses::error::Error { + failure_reason: format!("Authentication error: {err}"), + } + } +} diff --git a/src/servers/http/v1/handlers/common/mod.rs b/src/servers/http/v1/handlers/common/mod.rs new file mode 100644 index 000000000..30eaf37b7 --- /dev/null +++ b/src/servers/http/v1/handlers/common/mod.rs @@ -0,0 +1,3 @@ +//! Common logic for HTTP handlers. +pub mod auth; +pub mod peer_ip; diff --git a/src/servers/http/v1/handlers/common/peer_ip.rs b/src/servers/http/v1/handlers/common/peer_ip.rs new file mode 100644 index 000000000..5602bd26c --- /dev/null +++ b/src/servers/http/v1/handlers/common/peer_ip.rs @@ -0,0 +1,40 @@ +//! Logic to convert peer IP resolution errors into responses. +//! +//! The HTTP tracker may fail to resolve the peer IP address. This module +//! contains the logic to convert those +//! [`PeerIpResolutionError`] +//! errors into responses. +use crate::servers::http::v1::responses; +use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; + +impl From for responses::error::Error { + fn from(err: PeerIpResolutionError) -> Self { + responses::error::Error { + failure_reason: format!("Error resolving peer IP: {err}"), + } + } +} + +#[cfg(test)] +mod tests { + use std::panic::Location; + + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_map_a_peer_ip_resolution_error_into_an_error_response() { + let response = responses::error::Error::from(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }); + + assert_error_response(&response, "Error resolving peer IP"); + } +} diff --git a/src/servers/http/v1/handlers/health_check.rs b/src/servers/http/v1/handlers/health_check.rs new file mode 100644 index 000000000..b15af6255 --- /dev/null +++ b/src/servers/http/v1/handlers/health_check.rs @@ -0,0 +1,18 @@ +use axum::Json; +use serde::{Deserialize, Serialize}; + +#[allow(clippy::unused_async)] +pub async fn handler() -> Json { + Json(Report { status: Status::Ok }) +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum Status { + Ok, + Error, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Report { + pub status: Status, +} diff --git a/src/servers/http/v1/handlers/mod.rs b/src/servers/http/v1/handlers/mod.rs new file mode 100644 index 000000000..7b3a1e7c3 --- /dev/null +++ b/src/servers/http/v1/handlers/mod.rs @@ -0,0 +1,19 @@ +//! Axum [`handlers`](axum#handlers) for the HTTP server. +//! +//! Refer to the generic [HTTP server documentation](crate::servers::http) for +//! more information about the HTTP tracker. +use super::responses; +use crate::core::error::Error; + +pub mod announce; +pub mod common; +pub mod health_check; +pub mod scrape; + +impl From for responses::error::Error { + fn from(err: Error) -> Self { + responses::error::Error { + failure_reason: format!("Tracker error: {err}"), + } + } +} diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs new file mode 100644 index 000000000..ca4c85207 --- /dev/null +++ b/src/servers/http/v1/handlers/scrape.rs @@ -0,0 +1,279 @@ +//! Axum [`handlers`](axum#handlers) for the `announce` requests. +//! +//! Refer to [HTTP server](crate::servers::http) for more information about the +//! `scrape` request. +//! +//! The handlers perform the authentication and authorization of the request, +//! and resolve the client IP address. +use std::sync::Arc; + +use axum::extract::State; +use axum::response::{IntoResponse, Response}; + +use crate::core::auth::Key; +use crate::core::{ScrapeData, Tracker}; +use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; +use crate::servers::http::v1::requests::scrape::Scrape; +use crate::servers::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use crate::servers::http::v1::{responses, services}; + +/// It handles the `scrape` request when the HTTP tracker is configured +/// to run in `public` mode. +#[allow(clippy::unused_async)] +pub async fn handle_without_key( + State(tracker): State>, + ExtractRequest(scrape_request): ExtractRequest, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, +) -> Response { + tracing::debug!("http scrape request: {:#?}", &scrape_request); + + handle(&tracker, &scrape_request, &client_ip_sources, None).await +} + +/// It handles the `scrape` request when the HTTP tracker is configured +/// to run in `private` or `private_listed` mode. +/// +/// In this case, the authentication `key` parameter is required. +#[allow(clippy::unused_async)] +pub async fn handle_with_key( + State(tracker): State>, + ExtractRequest(scrape_request): ExtractRequest, + ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, + ExtractKey(key): ExtractKey, +) -> Response { + tracing::debug!("http scrape request: {:#?}", &scrape_request); + + handle(&tracker, &scrape_request, &client_ip_sources, Some(key)).await +} + +async fn handle( + tracker: &Arc, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Response { + let scrape_data = match handle_scrape(tracker, scrape_request, client_ip_sources, maybe_key).await { + Ok(scrape_data) => scrape_data, + Err(error) => return error.into_response(), + }; + build_response(scrape_data) +} + +/* code-review: authentication, authorization and peer IP resolution could be moved + from the handler (Axum) layer into the app layer `services::announce::invoke`. + That would make the handler even simpler and the code more reusable and decoupled from Axum. + See https://github.com/torrust/torrust-tracker/discussions/240. +*/ + +async fn handle_scrape( + tracker: &Arc, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + maybe_key: Option, +) -> Result { + // Authentication + let return_real_scrape_data = if tracker.requires_authentication() { + match maybe_key { + Some(key) => match tracker.authenticate(&key).await { + Ok(()) => true, + Err(_error) => false, + }, + None => false, + } + } else { + true + }; + + // Authorization for scrape requests is handled at the `Tracker` level + // for each torrent. + + let peer_ip = match peer_ip_resolver::invoke(tracker.is_behind_reverse_proxy(), client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(responses::error::Error::from(error)), + }; + + if return_real_scrape_data { + Ok(services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await) + } else { + Ok(services::scrape::fake(tracker, &scrape_request.info_hashes, &peer_ip).await) + } +} + +fn build_response(scrape_data: ScrapeData) -> Response { + responses::scrape::Bencoded::from(scrape_data).into_response() +} + +#[cfg(test)] +mod tests { + use std::net::IpAddr; + use std::str::FromStr; + + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::core::services::tracker_factory; + use crate::core::Tracker; + use crate::servers::http::v1::requests::scrape::Scrape; + use crate::servers::http::v1::responses; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + fn private_tracker() -> Tracker { + tracker_factory(&configuration::ephemeral_private()) + } + + fn whitelisted_tracker() -> Tracker { + tracker_factory(&configuration::ephemeral_listed()) + } + + fn tracker_on_reverse_proxy() -> Tracker { + tracker_factory(&configuration::ephemeral_with_reverse_proxy()) + } + + fn tracker_not_on_reverse_proxy() -> Tracker { + tracker_factory(&configuration::ephemeral_without_reverse_proxy()) + } + + fn sample_scrape_request() -> Scrape { + Scrape { + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], + } + } + + fn sample_client_ip_sources() -> ClientIpSources { + ClientIpSources { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: Some(IpAddr::from_str("203.0.113.196").unwrap()), + } + } + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + mod with_tracker_in_private_mode { + use std::str::FromStr; + use std::sync::Arc; + + use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; + use crate::core::{auth, ScrapeData}; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { + let tracker = Arc::new(private_tracker()); + + let scrape_request = sample_scrape_request(); + let maybe_key = None; + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { + let tracker = Arc::new(private_tracker()); + + let scrape_request = sample_scrape_request(); + let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let maybe_key = Some(unregistered_key); + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + + mod with_tracker_in_listed_mode { + + use std::sync::Arc; + + use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; + use crate::core::ScrapeData; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { + let tracker = Arc::new(whitelisted_tracker()); + + let scrape_request = sample_scrape_request(); + + let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), None) + .await + .unwrap(); + + let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + + mod with_tracker_on_reverse_proxy { + use std::sync::Arc; + + use super::{sample_scrape_request, tracker_on_reverse_proxy}; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { + let tracker = Arc::new(tracker_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", + ); + } + } + + mod with_tracker_not_on_reverse_proxy { + use std::sync::Arc; + + use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; + use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; + + #[tokio::test] + async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { + let tracker = Arc::new(tracker_not_on_reverse_proxy()); + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }; + + let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); + + assert_error_response( + &response, + "Error resolving peer IP: cannot get the client IP from the connection info", + ); + } + } +} diff --git a/src/servers/http/v1/mod.rs b/src/servers/http/v1/mod.rs new file mode 100644 index 000000000..9d2745692 --- /dev/null +++ b/src/servers/http/v1/mod.rs @@ -0,0 +1,11 @@ +//! HTTP server implementation for the `v1` API. +//! +//! Refer to the generic [HTTP server documentation](crate::servers::http) for +//! more information about the endpoints and their usage. +pub mod extractors; +pub mod handlers; +pub mod query; +pub mod requests; +pub mod responses; +pub mod routes; +pub mod services; diff --git a/src/servers/http/v1/query.rs b/src/servers/http/v1/query.rs new file mode 100644 index 000000000..3a078daae --- /dev/null +++ b/src/servers/http/v1/query.rs @@ -0,0 +1,355 @@ +//! The `Query` struct used to parse and store the URL query parameters. +//! +/// ```text +/// URI = scheme ":" ["//" authority] path ["?" query] ["#" fragment] +/// ``` +use std::panic::Location; +use std::str::FromStr; + +use multimap::MultiMap; +use thiserror::Error; + +type ParamName = String; +type ParamValue = String; + +/// It represents a URL query component. +/// +/// ```text +/// URI = scheme ":" ["//" authority] path ["?" query] ["#" fragment] +/// ``` +#[derive(Debug)] +pub struct Query { + /* code-review: + - Consider using a third-party crate. + - Conversion from/to string is not deterministic. Params can be in a different order in the query string. + */ + params: MultiMap, +} + +impl Query { + /// It return `Some(value)` for a URL query param if the param with the + /// input `name` exists. For example: + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::query::Query; + /// + /// let raw_query = "param1=value1¶m2=value2"; + /// + /// let query = raw_query.parse::().unwrap(); + /// + /// assert_eq!(query.get_param("param1").unwrap(), "value1"); + /// assert_eq!(query.get_param("param2").unwrap(), "value2"); + /// ``` + /// + /// It returns only the first param value even if it has multiple values: + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::query::Query; + /// + /// let raw_query = "param1=value1¶m1=value2"; + /// + /// let query = raw_query.parse::().unwrap(); + /// + /// assert_eq!(query.get_param("param1").unwrap(), "value1"); + /// ``` + #[must_use] + pub fn get_param(&self, name: &str) -> Option { + self.params.get(name).map(|pair| pair.value.clone()) + } + + /// Returns all the param values as a vector. + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::query::Query; + /// + /// let query = "param1=value1¶m1=value2".parse::().unwrap(); + /// + /// assert_eq!( + /// query.get_param_vec("param1"), + /// Some(vec!["value1".to_string(), "value2".to_string()]) + /// ); + /// ``` + /// + /// Returns all the param values as a vector even if it has only one value. + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::query::Query; + /// + /// let query = "param1=value1".parse::().unwrap(); + /// + /// assert_eq!( + /// query.get_param_vec("param1"), Some(vec!["value1".to_string()]) + /// ); + /// ``` + #[must_use] + pub fn get_param_vec(&self, name: &str) -> Option> { + self.params.get_vec(name).map(|pairs| { + let mut param_values = vec![]; + for pair in pairs { + param_values.push(pair.value.to_string()); + } + param_values + }) + } +} + +/// This error can be returned when parsing a [`Query`] +/// from a string. +#[derive(Error, Debug)] +pub enum ParseQueryError { + /// Invalid URL query param. For example: `"name=value=value"`. It contains + /// an unescaped `=` character. + #[error("invalid param {raw_param} in {location}")] + InvalidParam { + location: &'static Location<'static>, + raw_param: String, + }, +} + +impl FromStr for Query { + type Err = ParseQueryError; + + fn from_str(raw_query: &str) -> Result { + let mut params: MultiMap = MultiMap::new(); + + let raw_params = raw_query.trim().trim_start_matches('?').split('&').collect::>(); + + for raw_param in raw_params { + let pair: NameValuePair = raw_param.parse()?; + let param_name = pair.name.clone(); + params.insert(param_name, pair); + } + + Ok(Self { params }) + } +} + +impl From> for Query { + fn from(raw_params: Vec<(&str, &str)>) -> Self { + let mut params: MultiMap = MultiMap::new(); + + for raw_param in raw_params { + params.insert(raw_param.0.to_owned(), NameValuePair::new(raw_param.0, raw_param.1)); + } + + Self { params } + } +} + +impl std::fmt::Display for Query { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let query = self + .params + .iter_all() + .map(|param| format!("{}", FieldValuePairSet::from_vec(param.1))) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +#[derive(Debug, PartialEq, Clone)] +struct NameValuePair { + name: ParamName, + value: ParamValue, +} + +impl NameValuePair { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_owned(), + value: value.to_owned(), + } + } +} + +impl FromStr for NameValuePair { + type Err = ParseQueryError; + + fn from_str(raw_param: &str) -> Result { + let pair = raw_param.split('=').collect::>(); + + if pair.len() != 2 { + return Err(ParseQueryError::InvalidParam { + location: Location::caller(), + raw_param: raw_param.to_owned(), + }); + } + + Ok(Self { + name: pair[0].to_owned(), + value: pair[1].to_owned(), + }) + } +} + +impl std::fmt::Display for NameValuePair { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}={}", self.name, self.value) + } +} + +#[derive(Debug, PartialEq)] +struct FieldValuePairSet { + pairs: Vec, +} + +impl FieldValuePairSet { + fn from_vec(pair_vec: &Vec) -> Self { + let mut pairs: Vec = vec![]; + + for pair in pair_vec { + pairs.push(pair.clone()); + } + + Self { pairs } + } +} + +impl std::fmt::Display for FieldValuePairSet { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let query = self + .pairs + .iter() + .map(|pair| format!("{pair}")) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +#[cfg(test)] +mod tests { + + mod url_query { + use crate::servers::http::v1::query::Query; + + #[test] + fn should_parse_the_query_params_from_an_url_query_string() { + let raw_query = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001&port=17548"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!( + query.get_param("info_hash").unwrap(), + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + ); + assert_eq!(query.get_param("peer_id").unwrap(), "-qB00000000000000001"); + assert_eq!(query.get_param("port").unwrap(), "17548"); + } + + #[test] + fn should_be_instantiated_from_a_string_pair_vector() { + let query = Query::from(vec![("param1", "value1"), ("param2", "value2")]); + + assert_eq!(query.get_param("param1"), Some("value1".to_string())); + assert_eq!(query.get_param("param2"), Some("value2".to_string())); + } + + #[test] + fn should_fail_parsing_an_invalid_query_string() { + let invalid_raw_query = "name=value=value"; + + let query = invalid_raw_query.parse::(); + + assert!(query.is_err()); + } + + #[test] + fn should_ignore_the_preceding_question_mark_if_it_exists() { + let raw_query = "?name=value"; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name"), Some("value".to_string())); + } + + #[test] + fn should_trim_whitespaces() { + let raw_query = " name=value "; + + let query = raw_query.parse::().unwrap(); + + assert_eq!(query.get_param("name"), Some("value".to_string())); + } + + mod should_allow_more_than_one_value_for_the_same_param { + use crate::servers::http::v1::query::Query; + + #[test] + fn instantiated_from_a_vector() { + let query1 = Query::from(vec![("param1", "value1"), ("param1", "value2")]); + assert_eq!( + query1.get_param_vec("param1"), + Some(vec!["value1".to_string(), "value2".to_string()]) + ); + } + + #[test] + fn parsed_from_an_string() { + let query2 = "param1=value1¶m1=value2".parse::().unwrap(); + assert_eq!( + query2.get_param_vec("param1"), + Some(vec!["value1".to_string(), "value2".to_string()]) + ); + } + } + + mod should_be_displayed { + use crate::servers::http::v1::query::Query; + + #[test] + fn with_one_param() { + assert_eq!("param1=value1".parse::().unwrap().to_string(), "param1=value1"); + } + + #[test] + fn with_multiple_params() { + let query = "param1=value1¶m2=value2".parse::().unwrap().to_string(); + assert!(query == "param1=value1¶m2=value2" || query == "param2=value2¶m1=value1"); + } + + #[test] + fn with_multiple_values_for_the_same_param() { + let query = "param1=value1¶m1=value2".parse::().unwrap().to_string(); + assert!(query == "param1=value1¶m1=value2" || query == "param1=value2¶m1=value1"); + } + } + + mod param_name_value_pair { + use crate::servers::http::v1::query::NameValuePair; + + #[test] + fn should_parse_a_single_query_param() { + let raw_param = "name=value"; + + let param = raw_param.parse::().unwrap(); + + assert_eq!( + param, + NameValuePair { + name: "name".to_string(), + value: "value".to_string(), + } + ); + } + + #[test] + fn should_fail_parsing_an_invalid_query_param() { + let invalid_raw_param = "name=value=value"; + + let query = invalid_raw_param.parse::(); + + assert!(query.is_err()); + } + + #[test] + fn should_be_displayed() { + assert_eq!("name=value".parse::().unwrap().to_string(), "name=value"); + } + } + } +} diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs new file mode 100644 index 000000000..029bdbc01 --- /dev/null +++ b/src/servers/http/v1/requests/announce.rs @@ -0,0 +1,591 @@ +//! `Announce` request for the HTTP tracker. +//! +//! Data structures and logic for parsing the `announce` request. +use std::fmt; +use std::panic::Location; +use std::str::FromStr; + +use aquatic_udp_protocol::{NumberOfBytes, PeerId}; +use thiserror::Error; +use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; + +use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::servers::http::v1::query::{ParseQueryError, Query}; +use crate::servers::http::v1::responses; + +// Query param names +const INFO_HASH: &str = "info_hash"; +const PEER_ID: &str = "peer_id"; +const PORT: &str = "port"; +const DOWNLOADED: &str = "downloaded"; +const UPLOADED: &str = "uploaded"; +const LEFT: &str = "left"; +const EVENT: &str = "event"; +const COMPACT: &str = "compact"; +const NUMWANT: &str = "numwant"; + +/// The `Announce` request. Fields use the domain types after parsing the +/// query params of the request. +/// +/// ```rust +/// use aquatic_udp_protocol::{NumberOfBytes, PeerId}; +/// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// +/// let request = Announce { +/// // Mandatory params +/// info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), +/// peer_id: PeerId(*b"-qB00000000000000001"), +/// port: 17548, +/// // Optional params +/// downloaded: Some(NumberOfBytes::new(1)), +/// uploaded: Some(NumberOfBytes::new(1)), +/// left: Some(NumberOfBytes::new(1)), +/// event: Some(Event::Started), +/// compact: Some(Compact::NotAccepted), +/// numwant: Some(50) +/// }; +/// ``` +/// +/// > **NOTICE**: The [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +/// > specifies that only the peer `IP` and `event`are optional. However, the +/// > tracker defines default values for some of the mandatory params. +/// +/// > **NOTICE**: The struct does not contain the `IP` of the peer. It's not +/// > mandatory and it's not used by the tracker. The `IP` is obtained from the +/// > request itself. +#[derive(Debug, PartialEq)] +pub struct Announce { + // Mandatory params + /// The `InfoHash` of the torrent. + pub info_hash: InfoHash, + + /// The `PeerId` of the peer. + pub peer_id: PeerId, + + /// The port of the peer. + pub port: u16, + + // Optional params + /// The number of bytes downloaded by the peer. + pub downloaded: Option, + + /// The number of bytes uploaded by the peer. + pub uploaded: Option, + + /// The number of bytes left to download by the peer. + pub left: Option, + + /// The event that the peer is reporting. It can be `Started`, `Stopped` or + /// `Completed`. + pub event: Option, + + /// Whether the response should be in compact mode or not. + pub compact: Option, + + /// Number of peers that the client would receive from the tracker. The + /// value is permitted to be zero. + pub numwant: Option, +} + +/// Errors that can occur when parsing the `Announce` request. +/// +/// The `info_hash` and `peer_id` query params are special because they contain +/// binary data. The `info_hash` is a 20-byte SHA1 hash and the `peer_id` is a +/// 20-byte array. +#[derive(Error, Debug)] +pub enum ParseAnnounceQueryError { + /// A mandatory param is missing. + #[error("missing query params for announce request in {location}")] + MissingParams { location: &'static Location<'static> }, + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + /// The param cannot be parsed into the domain type. + #[error("invalid param value {param_value} for {param_name} in {location}")] + InvalidParam { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + /// The param value is out of range. + #[error("param value overflow {param_value} for {param_name} in {location}")] + NumberOfBytesOverflow { + param_name: String, + param_value: String, + location: &'static Location<'static>, + }, + /// The `info_hash` is invalid. + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, info_hash::ConversionError>, + }, + /// The `peer_id` is invalid. + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidPeerIdParam { + param_name: String, + param_value: String, + source: LocatedError<'static, peer::IdConversionError>, + }, +} + +/// The event that the peer is reporting: `started`, `completed` or `stopped`. +/// +/// If the event is not present or empty that means that the peer is just +/// updating its status. It's one of the announcements done at regular intervals. +/// +/// Refer to [BEP 03. The `BitTorrent Protocol` Specification](https://www.bittorrent.org/beps/bep_0003.html) +/// for more information. +#[derive(PartialEq, Debug)] +pub enum Event { + /// Event sent when a download first begins. + Started, + /// Event sent when the downloader cease downloading. + Stopped, + /// Event sent when the download is complete. + /// No `completed` is sent if the file was complete when started + Completed, +} + +impl FromStr for Event { + type Err = ParseAnnounceQueryError; + + fn from_str(raw_param: &str) -> Result { + match raw_param { + "started" => Ok(Self::Started), + "stopped" => Ok(Self::Stopped), + "completed" => Ok(Self::Completed), + _ => Err(ParseAnnounceQueryError::InvalidParam { + param_name: EVENT.to_owned(), + param_value: raw_param.to_owned(), + location: Location::caller(), + }), + } + } +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Event::Started => write!(f, "started"), + Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +/// Whether the `announce` response should be in compact mode or not. +/// +/// Depending on the value of this param, the tracker will return a different +/// response: +/// +/// - [`Normal`](crate::servers::http::v1::responses::announce::Normal), i.e. a `non-compact` response. +/// - [`Compact`](crate::servers::http::v1::responses::announce::Compact) response. +/// +/// Refer to [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +#[derive(PartialEq, Debug)] +pub enum Compact { + /// The client advises the tracker that the client prefers compact format. + Accepted = 1, + /// The client advises the tracker that is prefers the original format + /// described in [BEP 03. The BitTorrent Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} + +impl FromStr for Compact { + type Err = ParseAnnounceQueryError; + + fn from_str(raw_param: &str) -> Result { + match raw_param { + "1" => Ok(Self::Accepted), + "0" => Ok(Self::NotAccepted), + _ => Err(ParseAnnounceQueryError::InvalidParam { + param_name: COMPACT.to_owned(), + param_value: raw_param.to_owned(), + location: Location::caller(), + }), + } + } +} + +impl From for responses::error::Error { + fn from(err: ParseQueryError) -> Self { + responses::error::Error { + failure_reason: format!("Cannot parse query params: {err}"), + } + } +} + +impl From for responses::error::Error { + fn from(err: ParseAnnounceQueryError) -> Self { + responses::error::Error { + failure_reason: format!("Cannot parse query params for announce request: {err}"), + } + } +} + +impl TryFrom for Announce { + type Error = ParseAnnounceQueryError; + + fn try_from(query: Query) -> Result { + Ok(Self { + info_hash: extract_info_hash(&query)?, + peer_id: extract_peer_id(&query)?, + port: extract_port(&query)?, + downloaded: extract_downloaded(&query)?, + uploaded: extract_uploaded(&query)?, + left: extract_left(&query)?, + event: extract_event(&query)?, + compact: extract_compact(&query)?, + numwant: extract_numwant(&query)?, + }) + } +} + +// Mandatory params + +fn extract_info_hash(query: &Query) -> Result { + match query.get_param(INFO_HASH) { + Some(raw_param) => { + Ok( + percent_decode_info_hash(&raw_param).map_err(|err| ParseAnnounceQueryError::InvalidInfoHashParam { + param_name: INFO_HASH.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?, + ) + } + None => Err(ParseAnnounceQueryError::MissingParam { + location: Location::caller(), + param_name: INFO_HASH.to_owned(), + }), + } +} + +fn extract_peer_id(query: &Query) -> Result { + match query.get_param(PEER_ID) { + Some(raw_param) => Ok( + percent_decode_peer_id(&raw_param).map_err(|err| ParseAnnounceQueryError::InvalidPeerIdParam { + param_name: PEER_ID.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?, + ), + None => Err(ParseAnnounceQueryError::MissingParam { + location: Location::caller(), + param_name: PEER_ID.to_owned(), + }), + } +} + +fn extract_port(query: &Query) -> Result { + match query.get_param(PORT) { + Some(raw_param) => Ok(u16::from_str(&raw_param).map_err(|_e| ParseAnnounceQueryError::InvalidParam { + param_name: PORT.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + })?), + None => Err(ParseAnnounceQueryError::MissingParam { + location: Location::caller(), + param_name: PORT.to_owned(), + }), + } +} + +// Optional params + +fn extract_downloaded(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(DOWNLOADED, query) +} + +fn extract_uploaded(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(UPLOADED, query) +} + +fn extract_left(query: &Query) -> Result, ParseAnnounceQueryError> { + extract_number_of_bytes_from_param(LEFT, query) +} + +fn extract_number_of_bytes_from_param(param_name: &str, query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(param_name) { + Some(raw_param) => { + let number_of_bytes = u64::from_str(&raw_param).map_err(|_e| ParseAnnounceQueryError::InvalidParam { + param_name: param_name.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + })?; + + let number_of_bytes = + i64::try_from(number_of_bytes).map_err(|_e| ParseAnnounceQueryError::NumberOfBytesOverflow { + param_name: param_name.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + })?; + + let number_of_bytes = NumberOfBytes::new(number_of_bytes); + + Ok(Some(number_of_bytes)) + } + None => Ok(None), + } +} + +fn extract_event(query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(EVENT) { + Some(raw_param) => Ok(Some(Event::from_str(&raw_param)?)), + None => Ok(None), + } +} + +fn extract_compact(query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(COMPACT) { + Some(raw_param) => Ok(Some(Compact::from_str(&raw_param)?)), + None => Ok(None), + } +} + +fn extract_numwant(query: &Query) -> Result, ParseAnnounceQueryError> { + match query.get_param(NUMWANT) { + Some(raw_param) => match u32::from_str(&raw_param) { + Ok(numwant) => Ok(Some(numwant)), + Err(_) => Err(ParseAnnounceQueryError::InvalidParam { + param_name: NUMWANT.to_owned(), + param_value: raw_param.clone(), + location: Location::caller(), + }), + }, + None => Ok(None), + } +} + +#[cfg(test)] +mod tests { + + mod announce_request { + + use aquatic_udp_protocol::{NumberOfBytes, PeerId}; + use torrust_tracker_primitives::info_hash::InfoHash; + + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::announce::{ + Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, NUMWANT, PEER_ID, PORT, UPLOADED, + }; + + #[test] + fn should_be_instantiated_from_the_url_query_with_only_the_mandatory_params() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + ]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let announce_request = Announce::try_from(query).unwrap(); + + assert_eq!( + announce_request, + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: PeerId(*b"-qB00000000000000001"), + port: 17548, + downloaded: None, + uploaded: None, + left: None, + event: None, + compact: None, + numwant: None, + } + ); + } + + #[test] + fn should_be_instantiated_from_the_url_query_params() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (DOWNLOADED, "1"), + (UPLOADED, "2"), + (LEFT, "3"), + (EVENT, "started"), + (COMPACT, "0"), + (NUMWANT, "50"), + ]) + .to_string(); + + let query = raw_query.parse::().unwrap(); + + let announce_request = Announce::try_from(query).unwrap(); + + assert_eq!( + announce_request, + Announce { + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + peer_id: PeerId(*b"-qB00000000000000001"), + port: 17548, + downloaded: Some(NumberOfBytes::new(1)), + uploaded: Some(NumberOfBytes::new(2)), + left: Some(NumberOfBytes::new(3)), + event: Some(Event::Started), + compact: Some(Compact::NotAccepted), + numwant: Some(50), + } + ); + } + + mod when_it_is_instantiated_from_the_url_query_params { + + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::announce::{ + Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, NUMWANT, PEER_ID, PORT, UPLOADED, + }; + + #[test] + fn it_should_fail_if_the_query_does_not_include_all_the_mandatory_params() { + let raw_query_without_info_hash = "peer_id=-qB00000000000000001&port=17548"; + + assert!(Announce::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); + + let raw_query_without_peer_id = "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&port=17548"; + + assert!(Announce::try_from(raw_query_without_peer_id.parse::().unwrap()).is_err()); + + let raw_query_without_port = + "info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0&peer_id=-qB00000000000000001"; + + assert!(Announce::try_from(raw_query_without_port.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_info_hash_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "INVALID_INFO_HASH_VALUE"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_peer_id_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "INVALID_PEER_ID_VALUE"), + (PORT, "17548"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_port_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "INVALID_PORT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_downloaded_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (DOWNLOADED, "INVALID_DOWNLOADED_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_uploaded_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (UPLOADED, "INVALID_UPLOADED_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_left_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (LEFT, "INVALID_LEFT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_event_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (EVENT, "INVALID_EVENT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_compact_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (COMPACT, "INVALID_COMPACT_VALUE"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_numwant_param_is_invalid() { + let raw_query = Query::from(vec![ + (INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"), + (PEER_ID, "-qB00000000000000001"), + (PORT, "17548"), + (NUMWANT, "-1"), + ]) + .to_string(); + + assert!(Announce::try_from(raw_query.parse::().unwrap()).is_err()); + } + } + } +} diff --git a/src/servers/http/v1/requests/mod.rs b/src/servers/http/v1/requests/mod.rs new file mode 100644 index 000000000..ee34ca72a --- /dev/null +++ b/src/servers/http/v1/requests/mod.rs @@ -0,0 +1,6 @@ +//! HTTP requests for the HTTP tracker. +//! +//! Refer to the generic [HTTP server documentation](crate::servers::http) for +//! more information about the HTTP tracker. +pub mod announce; +pub mod scrape; diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs new file mode 100644 index 000000000..c61d3be1f --- /dev/null +++ b/src/servers/http/v1/requests/scrape.rs @@ -0,0 +1,128 @@ +//! `Scrape` request for the HTTP tracker. +//! +//! Data structures and logic for parsing the `scrape` request. +use std::panic::Location; + +use thiserror::Error; +use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; + +use crate::servers::http::percent_encoding::percent_decode_info_hash; +use crate::servers::http::v1::query::Query; +use crate::servers::http::v1::responses; + +// Query param names +const INFO_HASH: &str = "info_hash"; + +#[derive(Debug, PartialEq)] +pub struct Scrape { + pub info_hashes: Vec, +} + +#[derive(Error, Debug)] +pub enum ParseScrapeQueryError { + #[error("missing query params for scrape request in {location}")] + MissingParams { location: &'static Location<'static> }, + #[error("missing param {param_name} in {location}")] + MissingParam { + location: &'static Location<'static>, + param_name: String, + }, + #[error("invalid param value {param_value} for {param_name} in {source}")] + InvalidInfoHashParam { + param_name: String, + param_value: String, + source: LocatedError<'static, info_hash::ConversionError>, + }, +} + +impl From for responses::error::Error { + fn from(err: ParseScrapeQueryError) -> Self { + responses::error::Error { + failure_reason: format!("Cannot parse query params for scrape request: {err}"), + } + } +} + +impl TryFrom for Scrape { + type Error = ParseScrapeQueryError; + + fn try_from(query: Query) -> Result { + Ok(Self { + info_hashes: extract_info_hashes(&query)?, + }) + } +} + +fn extract_info_hashes(query: &Query) -> Result, ParseScrapeQueryError> { + match query.get_param_vec(INFO_HASH) { + Some(raw_params) => { + let mut info_hashes = vec![]; + + for raw_param in raw_params { + let info_hash = + percent_decode_info_hash(&raw_param).map_err(|err| ParseScrapeQueryError::InvalidInfoHashParam { + param_name: INFO_HASH.to_owned(), + param_value: raw_param.clone(), + source: Located(err).into(), + })?; + + info_hashes.push(info_hash); + } + + Ok(info_hashes) + } + None => Err(ParseScrapeQueryError::MissingParam { + location: Location::caller(), + param_name: INFO_HASH.to_owned(), + }), + } +} + +#[cfg(test)] +mod tests { + + mod scrape_request { + + use torrust_tracker_primitives::info_hash::InfoHash; + + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; + + #[test] + fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { + let raw_query = Query::from(vec![(INFO_HASH, "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0")]).to_string(); + + let query = raw_query.parse::().unwrap(); + + let scrape_request = Scrape::try_from(query).unwrap(); + + assert_eq!( + scrape_request, + Scrape { + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], + } + ); + } + + mod when_it_is_instantiated_from_the_url_query_params { + + use crate::servers::http::v1::query::Query; + use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; + + #[test] + fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { + let raw_query_without_info_hash = "another_param=NOT_RELEVANT"; + + assert!(Scrape::try_from(raw_query_without_info_hash.parse::().unwrap()).is_err()); + } + + #[test] + fn it_should_fail_if_the_info_hash_param_is_invalid() { + let raw_query = Query::from(vec![(INFO_HASH, "INVALID_INFO_HASH_VALUE")]).to_string(); + + assert!(Scrape::try_from(raw_query.parse::().unwrap()).is_err()); + } + } + } +} diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs new file mode 100644 index 000000000..f223a4bb0 --- /dev/null +++ b/src/servers/http/v1/responses/announce.rs @@ -0,0 +1,373 @@ +//! `Announce` response for the HTTP tracker [`announce`](crate::servers::http::v1::requests::announce::Announce) request. +//! +//! Data structures and logic to build the `announce` response. +use std::io::Write; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + +use axum::http::StatusCode; +use derive_more::{AsRef, Constructor, From}; +use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use torrust_tracker_primitives::peer; + +use super::Response; +use crate::core::AnnounceData; +use crate::servers::http::v1::responses; + +/// An [`Announce`] response, that can be anything that is convertible from [`AnnounceData`]. +/// +/// The [`Announce`] can built from any data that implements: [`From`] and [`Into>`]. +/// +/// The two standard forms of an announce response are: [`Normal`] and [`Compact`]. +/// +/// +/// _"To reduce the size of tracker responses and to reduce memory and +/// computational requirements in trackers, trackers may return peers as a +/// packed string rather than as a bencoded list."_ +/// +/// Refer to the official BEPs for more information: +/// +/// - [BEP 03: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +/// - [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +/// - [BEP 07: IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) + +#[derive(Debug, AsRef, PartialEq, Constructor)] +pub struct Announce +where + E: From + Into>, +{ + data: E, +} + +/// Build any [`Announce`] from an [`AnnounceData`]. +impl + Into>> From for Announce { + fn from(data: AnnounceData) -> Self { + Self::new(data.into()) + } +} + +/// Convert any Announce [`Announce`] into a [`axum::response::Response`] +impl + Into>> axum::response::IntoResponse for Announce +where + Announce: Response, +{ + fn into_response(self) -> axum::response::Response { + axum::response::IntoResponse::into_response(self.body().map(|bytes| (StatusCode::OK, bytes))) + } +} + +/// Implement the [`Response`] for the [`Announce`]. +/// +impl + Into>> Response for Announce { + fn body(self) -> Result, responses::error::Error> { + Ok(self.data.into()) + } +} + +/// Format of the [`Normal`] (Non-Compact) Encoding +pub struct Normal { + complete: i64, + incomplete: i64, + interval: i64, + min_interval: i64, + peers: Vec, +} + +impl From for Normal { + fn from(data: AnnounceData) -> Self { + Self { + complete: data.stats.complete.into(), + incomplete: data.stats.incomplete.into(), + interval: data.policy.interval.into(), + min_interval: data.policy.interval_min.into(), + peers: data.peers.iter().map(AsRef::as_ref).copied().collect(), + } + } +} + +#[allow(clippy::from_over_into)] +impl Into> for Normal { + fn into(self) -> Vec { + let mut peers_list = ben_list!(); + let peers_list_mut = peers_list.list_mut().unwrap(); + for peer in &self.peers { + peers_list_mut.push(peer.into()); + } + + (ben_map! { + "complete" => ben_int!(self.complete), + "incomplete" => ben_int!(self.incomplete), + "interval" => ben_int!(self.interval), + "min interval" => ben_int!(self.min_interval), + "peers" => peers_list.clone() + }) + .encode() + } +} + +/// Format of the [`Compact`] Encoding +pub struct Compact { + complete: i64, + incomplete: i64, + interval: i64, + min_interval: i64, + peers: Vec, + peers6: Vec, +} + +impl From for Compact { + fn from(data: AnnounceData) -> Self { + let compact_peers: Vec = data.peers.iter().map(AsRef::as_ref).copied().collect(); + + let (peers, peers6): (Vec>, Vec>) = + compact_peers.into_iter().collect(); + + let peers_encoded: CompactPeersEncoded = peers.into_iter().collect(); + let peers_encoded_6: CompactPeersEncoded = peers6.into_iter().collect(); + + Self { + complete: data.stats.complete.into(), + incomplete: data.stats.incomplete.into(), + interval: data.policy.interval.into(), + min_interval: data.policy.interval_min.into(), + peers: peers_encoded.0, + peers6: peers_encoded_6.0, + } + } +} + +#[allow(clippy::from_over_into)] +impl Into> for Compact { + fn into(self) -> Vec { + (ben_map! { + "complete" => ben_int!(self.complete), + "incomplete" => ben_int!(self.incomplete), + "interval" => ben_int!(self.interval), + "min interval" => ben_int!(self.min_interval), + "peers" => ben_bytes!(self.peers), + "peers6" => ben_bytes!(self.peers6) + }) + .encode() + } +} + +/// A [`NormalPeer`], for the [`Normal`] form. +/// +/// ```rust +/// use std::net::{IpAddr, Ipv4Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; +/// +/// let peer = NormalPeer { +/// peer_id: *b"-qB00000000000000001", +/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 +/// port: 0x7070, // 28784 +/// }; +/// +/// ``` +#[derive(Debug, PartialEq)] +pub struct NormalPeer { + /// The peer's ID. + pub peer_id: [u8; 20], + /// The peer's IP address. + pub ip: IpAddr, + /// The peer's port number. + pub port: u16, +} + +impl peer::Encoding for NormalPeer {} + +impl From for NormalPeer { + fn from(peer: peer::Peer) -> Self { + NormalPeer { + peer_id: peer.peer_id.0, + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + } + } +} + +impl From<&NormalPeer> for BencodeMut<'_> { + fn from(value: &NormalPeer) -> Self { + ben_map! { + "peer id" => ben_bytes!(value.peer_id.clone().to_vec()), + "ip" => ben_bytes!(value.ip.to_string()), + "port" => ben_int!(i64::from(value.port)) + } + } +} + +/// A [`CompactPeer`], for the [`Compact`] form. +/// +/// _"To reduce the size of tracker responses and to reduce memory and +/// computational requirements in trackers, trackers may return peers as a +/// packed string rather than as a bencoded list."_ +/// +/// A part from reducing the size of the response, this format does not contain +/// the peer's ID. +/// +/// ```rust +/// use std::net::{IpAddr, Ipv4Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; +/// +/// let peer = CompactPeer::V4(CompactPeerData { +/// ip: Ipv4Addr::new(0x69, 0x69, 0x69, 0x69), // 105.105.105.105 +/// port: 0x7070, // 28784 +/// }); +/// +/// ``` +/// +/// Refer to [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +/// for more information. +#[derive(Clone, Debug, PartialEq)] +pub enum CompactPeer { + /// The peer's IP address. + V4(CompactPeerData), + /// The peer's port number. + V6(CompactPeerData), +} + +impl peer::Encoding for CompactPeer {} + +impl From for CompactPeer { + fn from(peer: peer::Peer) -> Self { + match (peer.peer_addr.ip(), peer.peer_addr.port()) { + (IpAddr::V4(ip), port) => Self::V4(CompactPeerData { ip, port }), + (IpAddr::V6(ip), port) => Self::V6(CompactPeerData { ip, port }), + } + } +} + +/// The [`CompactPeerData`], that made with either a [`Ipv4Addr`], or [`Ipv6Addr`] along with a `port`. +/// +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeerData { + /// The peer's IP address. + pub ip: V, + /// The peer's port number. + pub port: u16, +} + +impl FromIterator for (Vec>, Vec>) { + fn from_iter>(iter: T) -> Self { + let mut peers_v4: Vec> = vec![]; + let mut peers_v6: Vec> = vec![]; + + for peer in iter { + match peer { + CompactPeer::V4(peer) => peers_v4.push(peer), + CompactPeer::V6(peer6) => peers_v6.push(peer6), + } + } + + (peers_v4, peers_v6) + } +} + +#[derive(From, PartialEq)] +struct CompactPeersEncoded(Vec); + +impl FromIterator> for CompactPeersEncoded { + fn from_iter>>(iter: T) -> Self { + let mut bytes: Vec = vec![]; + + for peer in iter { + bytes + .write_all(&u32::from(peer.ip).to_be_bytes()) + .expect("it should write peer ip"); + bytes.write_all(&peer.port.to_be_bytes()).expect("it should write peer port"); + } + + bytes.into() + } +} + +impl FromIterator> for CompactPeersEncoded { + fn from_iter>>(iter: T) -> Self { + let mut bytes: Vec = Vec::new(); + + for peer in iter { + bytes + .write_all(&u128::from(peer.ip).to_be_bytes()) + .expect("it should write peer ip"); + bytes.write_all(&peer.port.to_be_bytes()).expect("it should write peer port"); + } + bytes.into() + } +} + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::PeerId; + use torrust_tracker_configuration::AnnouncePolicy; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::core::AnnounceData; + use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; + + // Some ascii values used in tests: + // + // +-----------------+ + // | Dec | Hex | Chr | + // +-----------------+ + // | 105 | 69 | i | + // | 112 | 70 | p | + // +-----------------+ + // + // IP addresses and port numbers used in tests are chosen so that their bencoded representation + // is also a valid string which makes asserts more readable. + + fn setup_announce_data() -> AnnounceData { + let policy = AnnouncePolicy::new(111, 222); + + let peer_ipv4 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 0x7070)) + .build(); + + let peer_ipv6 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 0x7070, + )) + .build(); + + let peers = vec![Arc::new(peer_ipv4), Arc::new(peer_ipv6)]; + let stats = SwarmMetadata::new(333, 333, 444); + + AnnounceData::new(peers, stats, policy) + } + + #[test] + fn non_compact_announce_response_can_be_bencoded() { + let response: Announce = setup_announce_data().into(); + let bytes = response.body().expect("it should encode the response"); + + // cspell:disable-next-line + let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); + } + + #[test] + fn compact_announce_response_can_be_bencoded() { + let response: Announce = setup_announce_data().into(); + let bytes = response.body().expect("it should encode the response"); + + let expected_bytes = + // cspell:disable-next-line + b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); + } +} diff --git a/src/servers/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs new file mode 100644 index 000000000..c406c797a --- /dev/null +++ b/src/servers/http/v1/responses/error.rs @@ -0,0 +1,69 @@ +//! `Error` response for the [`HTTP tracker`](crate::servers::http). +//! +//! Data structures and logic to build the error responses. +//! +//! From the [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html): +//! +//! _"Tracker responses are bencoded dictionaries. If a tracker response has a +//! key failure reason, then that maps to a human readable string which explains +//! why the query failed, and no other keys are required."_ +//! +//! > **NOTICE**: error responses are bencoded and always have a `200 OK` status +//! > code. The official `BitTorrent` specification does not specify the status +//! > code. +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use serde::Serialize; + +/// `Error` response for the [`HTTP tracker`](crate::servers::http). +#[derive(Serialize, Debug, PartialEq)] +pub struct Error { + /// Human readable string which explains why the request failed. + #[serde(rename = "failure reason")] + pub failure_reason: String, +} + +impl Error { + /// Returns the bencoded representation of the `Error` struct. + /// + /// ```rust + /// use torrust_tracker::servers::http::v1::responses::error::Error; + /// + /// let err = Error { + /// failure_reason: "error message".to_owned(), + /// }; + /// + /// // cspell:disable-next-line + /// assert_eq!(err.write(), "d14:failure reason13:error messagee"); + /// ``` + /// + /// # Panics + /// + /// It would panic if the `Error` struct contained an inappropriate field + /// type. + #[must_use] + pub fn write(&self) -> String { + serde_bencode::to_string(&self).unwrap() + } +} + +impl IntoResponse for Error { + fn into_response(self) -> Response { + (StatusCode::OK, self.write()).into_response() + } +} + +#[cfg(test)] +mod tests { + + use super::Error; + + #[test] + fn http_tracker_errors_can_be_bencoded() { + let err = Error { + failure_reason: "error message".to_owned(), + }; + + assert_eq!(err.write(), "d14:failure reason13:error messagee"); // cspell:disable-line + } +} diff --git a/src/servers/http/v1/responses/mod.rs b/src/servers/http/v1/responses/mod.rs new file mode 100644 index 000000000..e22879c6d --- /dev/null +++ b/src/servers/http/v1/responses/mod.rs @@ -0,0 +1,19 @@ +//! HTTP responses for the HTTP tracker. +//! +//! Refer to the generic [HTTP server documentation](crate::servers::http) for +//! more information about the HTTP tracker. +pub mod announce; +pub mod error; +pub mod scrape; + +pub use announce::{Announce, Compact, Normal}; + +/// Trait that defines the Announce Response Format +pub trait Response: axum::response::IntoResponse { + /// Returns the Body of the Announce Response + /// + /// # Errors + /// + /// If unable to generate the response, it will return an error. + fn body(self) -> Result, error::Error>; +} diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs new file mode 100644 index 000000000..9690d4392 --- /dev/null +++ b/src/servers/http/v1/responses/scrape.rs @@ -0,0 +1,144 @@ +//! `Scrape` response for the HTTP tracker [`scrape`](crate::servers::http::v1::requests::scrape::Scrape) request. +//! +//! Data structures and logic to build the `scrape` response. +use std::borrow::Cow; + +use axum::http::StatusCode; +use axum::response::{IntoResponse, Response}; +use torrust_tracker_contrib_bencode::{ben_int, ben_map, BMutAccess}; + +use crate::core::ScrapeData; + +/// The `Scrape` response for the HTTP tracker. +/// +/// ```rust +/// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +/// use torrust_tracker::core::ScrapeData; +/// +/// let info_hash = InfoHash::from_bytes(&[0x69; 20]); +/// let mut scrape_data = ScrapeData::empty(); +/// scrape_data.add_file( +/// &info_hash, +/// SwarmMetadata { +/// complete: 1, +/// downloaded: 2, +/// incomplete: 3, +/// }, +/// ); +/// +/// let response = Bencoded::from(scrape_data); +/// +/// let bytes = response.body(); +/// +/// // cspell:disable-next-line +/// let expected_bytes = b"d5:filesd20:iiiiiiiiiiiiiiiiiiiid8:completei1e10:downloadedi2e10:incompletei3eeee"; +/// +/// assert_eq!( +/// String::from_utf8(bytes).unwrap(), +/// String::from_utf8(expected_bytes.to_vec()).unwrap() +/// ); +/// ``` +#[derive(Debug, PartialEq, Default)] +pub struct Bencoded { + /// The scrape data to be bencoded. + scrape_data: ScrapeData, +} + +impl Bencoded { + /// Returns the bencoded representation of the `Scrape` struct. + /// + /// # Panics + /// + /// Will return an error if it can't access the bencode as a mutable `BDictAccess`. + #[must_use] + pub fn body(&self) -> Vec { + let mut scrape_list = ben_map!(); + + let scrape_list_mut = scrape_list.dict_mut().unwrap(); + + for (info_hash, value) in &self.scrape_data.files { + scrape_list_mut.insert( + Cow::from(info_hash.bytes().to_vec()), + ben_map! { + "complete" => ben_int!(i64::from(value.complete)), + "downloaded" => ben_int!(i64::from(value.downloaded)), + "incomplete" => ben_int!(i64::from(value.incomplete)) + }, + ); + } + + (ben_map! { + "files" => scrape_list + }) + .encode() + } +} + +impl From for Bencoded { + fn from(scrape_data: ScrapeData) -> Self { + Self { scrape_data } + } +} + +impl IntoResponse for Bencoded { + fn into_response(self) -> Response { + (StatusCode::OK, self.body()).into_response() + } +} + +#[cfg(test)] +mod tests { + + mod scrape_response { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::core::ScrapeData; + use crate::servers::http::v1::responses::scrape::Bencoded; + + fn sample_scrape_data() -> ScrapeData { + let info_hash = InfoHash::from_bytes(&[0x69; 20]); + let mut scrape_data = ScrapeData::empty(); + scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 1, + downloaded: 2, + incomplete: 3, + }, + ); + scrape_data + } + + #[test] + fn should_be_converted_from_scrape_data() { + let response = Bencoded::from(sample_scrape_data()); + + assert_eq!( + response, + Bencoded { + scrape_data: sample_scrape_data() + } + ); + } + + #[test] + fn should_be_bencoded() { + let response = Bencoded { + scrape_data: sample_scrape_data(), + }; + + let bytes = response.body(); + + // cspell:disable-next-line + let expected_bytes = b"d5:filesd20:iiiiiiiiiiiiiiiiiiiid8:completei1e10:downloadedi2e10:incompletei3eeee"; + + assert_eq!( + String::from_utf8(bytes).unwrap(), + String::from_utf8(expected_bytes.to_vec()).unwrap() + ); + } + } +} diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs new file mode 100644 index 000000000..16e39b61b --- /dev/null +++ b/src/servers/http/v1/routes.rs @@ -0,0 +1,85 @@ +//! HTTP server routes for version `v1`. +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use axum::error_handling::HandleErrorLayer; +use axum::http::HeaderName; +use axum::response::Response; +use axum::routing::get; +use axum::{BoxError, Router}; +use axum_client_ip::SecureClientIpSource; +use hyper::{Request, StatusCode}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use tower::timeout::TimeoutLayer; +use tower::ServiceBuilder; +use tower_http::compression::CompressionLayer; +use tower_http::propagate_header::PropagateHeaderLayer; +use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{instrument, Level, Span}; + +use super::handlers::{announce, health_check, scrape}; +use crate::core::Tracker; +use crate::servers::http::HTTP_TRACKER_LOG_TARGET; + +/// It adds the routes to the router. +/// +/// > **NOTICE**: it's added a layer to get the client IP from the connection +/// > info. The tracker could use the connection info to get the client IP. +#[allow(clippy::needless_pass_by_value)] +#[instrument(skip(tracker, server_socket_addr))] +pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { + Router::new() + // Health check + .route("/health_check", get(health_check::handler)) + // Announce request + .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) + .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) + // Scrape request + .route("/scrape", get(scrape::handle_without_key).with_state(tracker.clone())) + .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker)) + // Add extension to get the client IP from the connection info + .layer(SecureClientIpSource::ConnectInfo.into_extension()) + .layer(CompressionLayer::new()) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) + .on_request(move |request: &Request, _span: &Span| { + let method = request.method().to_string(); + let uri = request.uri().to_string(); + let request_id = request + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + + tracing::span!( + target: HTTP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "request", server_socket_addr= %server_socket_addr, method = %method, uri = %uri, request_id = %request_id); + }) + .on_response(move |response: &Response, latency: Duration, _span: &Span| { + let status_code = response.status(); + let request_id = response + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + let latency_ms = latency.as_millis(); + + tracing::span!( + target: HTTP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "response", server_socket_addr= %server_socket_addr, latency = %latency_ms, status = %status_code, request_id = %request_id); + }), + ) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer( + ServiceBuilder::new() + // this middleware goes above `TimeoutLayer` because it will receive + // errors returned by `TimeoutLayer` + .layer(HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT })) + .layer(TimeoutLayer::new(DEFAULT_TIMEOUT)), + ) +} diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs new file mode 100644 index 000000000..9c5dfdad2 --- /dev/null +++ b/src/servers/http/v1/services/announce.rs @@ -0,0 +1,223 @@ +//! The `announce` service. +//! +//! The service is responsible for handling the `announce` requests. +//! +//! It delegates the `announce` logic to the [`Tracker`](crate::core::Tracker::announce) +//! and it returns the [`AnnounceData`] returned +//! by the [`Tracker`]. +//! +//! It also sends an [`statistics::Event`] +//! because events are specific for the HTTP tracker. +use std::net::IpAddr; +use std::sync::Arc; + +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + +use crate::core::{statistics, AnnounceData, PeersWanted, Tracker}; + +/// The HTTP tracker `announce` service. +/// +/// The service sends an statistics event that increments: +/// +/// - The number of TCP connections handled by the HTTP tracker. +/// - The number of TCP `announce` requests handled by the HTTP tracker. +/// +/// > **NOTICE**: as the HTTP tracker does not requires a connection request +/// > like the UDP tracker, the number of TCP connections is incremented for +/// > each `announce` request. +pub async fn invoke( + tracker: Arc, + info_hash: InfoHash, + peer: &mut peer::Peer, + peers_wanted: &PeersWanted, +) -> AnnounceData { + let original_peer_ip = peer.peer_addr.ip(); + + // The tracker could change the original peer ip + let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip, peers_wanted); + + match original_peer_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Announce).await; + } + } + + announce_data +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + use torrust_tracker_test_helpers::configuration; + + use crate::core::services::tracker_factory; + use crate::core::Tracker; + + fn public_tracker() -> Tracker { + tracker_factory(&configuration::ephemeral_public()) + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + fn sample_peer_using_ipv4() -> peer::Peer { + sample_peer() + } + + fn sample_peer_using_ipv6() -> peer::Peer { + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + ); + peer + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } + } + + mod with_tracker_in_any_mode { + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use torrust_tracker_test_helpers::configuration; + + use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::core::{statistics, AnnounceData, PeersWanted, Tracker}; + use crate::servers::http::v1::services::announce::invoke; + use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + + #[tokio::test] + async fn it_should_return_the_announce_data() { + let tracker = Arc::new(public_tracker()); + + let mut peer = sample_peer(); + + let announce_data = invoke(tracker.clone(), sample_info_hash(), &mut peer, &PeersWanted::All).await; + + let expected_announce_data = AnnounceData { + peers: vec![], + stats: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0, + }, + policy: tracker.get_announce_policy(), + }; + + assert_eq!(announce_data, expected_announce_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let mut peer = sample_peer_using_ipv4(); + + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer, &PeersWanted::All).await; + } + + fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { + let mut configuration = configuration::ephemeral(); + configuration.core.net.external_ip = Some(IpAddr::V6(Ipv6Addr::new( + 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, + ))); + + Tracker::new(&configuration.core, Some(stats_event_sender), statistics::Repo::new()).unwrap() + } + + fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { + let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new(loopback_ip, 8080); + peer + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4_even_if_the_tracker_changes_the_peer_ip_to_ipv6() + { + // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. + + // Assert that the event sent is a TCP4 event + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let mut peer = peer_with_the_ipv4_loopback_ip(); + + let _announce_data = invoke( + tracker_with_an_ipv6_external_ip(stats_event_sender).into(), + sample_info_hash(), + &mut peer, + &PeersWanted::All, + ) + .await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() + { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let mut peer = sample_peer_using_ipv6(); + + let _announce_data = invoke(tracker, sample_info_hash(), &mut peer, &PeersWanted::All).await; + } + } +} diff --git a/src/servers/http/v1/services/mod.rs b/src/servers/http/v1/services/mod.rs new file mode 100644 index 000000000..2e6285d1a --- /dev/null +++ b/src/servers/http/v1/services/mod.rs @@ -0,0 +1,10 @@ +//! Application services for the HTTP tracker. +//! +//! These modules contain logic that is specific for the HTTP tracker but it +//! does depend on the Axum web server. It could be reused for other web +//! servers. +//! +//! Refer to [`torrust_tracker`](crate) documentation. +pub mod announce; +pub mod peer_ip_resolver; +pub mod scrape; diff --git a/src/servers/http/v1/services/peer_ip_resolver.rs b/src/servers/http/v1/services/peer_ip_resolver.rs new file mode 100644 index 000000000..b8987bb4d --- /dev/null +++ b/src/servers/http/v1/services/peer_ip_resolver.rs @@ -0,0 +1,218 @@ +//! This service resolves the peer IP from the request. +//! +//! The peer IP is used to identify the peer in the tracker. It's the peer IP +//! that is used in the `announce` responses (peer list). And it's also used to +//! send statistics events. +//! +//! Given this request chain: +//! +//! ```text +//! client <-> http proxy 1 <-> http proxy 2 <-> server +//! ip: 126.0.0.1 ip: 126.0.0.2 ip: 126.0.0.3 ip: 126.0.0.4 +//! X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 +//! ``` +//! +//! This service returns two options for the peer IP: +//! +//! ```text +//! right_most_x_forwarded_for = 126.0.0.2 +//! connection_info_ip = 126.0.0.3 +//! ``` +//! +//! Depending on the tracker configuration. +use std::net::IpAddr; +use std::panic::Location; + +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +/// This struct contains the sources from which the peer IP can be obtained. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct ClientIpSources { + /// The right most IP from the `X-Forwarded-For` HTTP header. + pub right_most_x_forwarded_for: Option, + /// The IP from the connection info. + pub connection_info_ip: Option, +} + +/// The error that can occur when resolving the peer IP. +#[derive(Error, Debug)] +pub enum PeerIpResolutionError { + /// The peer IP cannot be obtained because the tracker is configured as a + /// reverse proxy but the `X-Forwarded-For` HTTP header is missing or + /// invalid. + #[error( + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" + )] + MissingRightMostXForwardedForIp { location: &'static Location<'static> }, + /// The peer IP cannot be obtained because the tracker is not configured as + /// a reverse proxy but the connection info was not provided to the Axum + /// framework via a route extension. + #[error("cannot get the client IP from the connection info in {location}")] + MissingClientIp { location: &'static Location<'static> }, +} + +/// Resolves the peer IP from the request. +/// +/// Given the sources from which the peer IP can be obtained, this function +/// resolves the peer IP according to the tracker configuration. +/// +/// With the tracker running on reverse proxy mode: +/// +/// ```rust +/// use std::net::IpAddr; +/// use std::str::FromStr; +/// +/// use torrust_tracker::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// +/// let on_reverse_proxy = true; +/// +/// let ip = invoke( +/// on_reverse_proxy, +/// &ClientIpSources { +/// right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), +/// connection_info_ip: None, +/// }, +/// ) +/// .unwrap(); +/// +/// assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); +/// ``` +/// +/// With the tracker non running on reverse proxy mode: +/// +/// ```rust +/// use std::net::IpAddr; +/// use std::str::FromStr; +/// +/// use torrust_tracker::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// +/// let on_reverse_proxy = false; +/// +/// let ip = invoke( +/// on_reverse_proxy, +/// &ClientIpSources { +/// right_most_x_forwarded_for: None, +/// connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), +/// }, +/// ) +/// .unwrap(); +/// +/// assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); +/// ``` +/// +/// # Errors +/// +/// Will return an error if the peer IP cannot be obtained according to the configuration. +/// For example, if the IP is extracted from an HTTP header which is missing in the request. +pub fn invoke(on_reverse_proxy: bool, client_ip_sources: &ClientIpSources) -> Result { + if on_reverse_proxy { + resolve_peer_ip_on_reverse_proxy(client_ip_sources) + } else { + resolve_peer_ip_without_reverse_proxy(client_ip_sources) + } +} + +fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { + if let Some(ip) = remote_client_ip.connection_info_ip { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingClientIp { + location: Location::caller(), + }) + } +} + +fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { + if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::invoke; + + mod working_without_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use super::invoke; + use crate::servers::http::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + + #[test] + fn it_should_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let ip = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { + let on_reverse_proxy = false; + + let error = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert!(matches!(error, PeerIpResolutionError::MissingClientIp { .. })); + } + } + + mod working_on_reverse_proxy { + use std::net::IpAddr; + use std::str::FromStr; + + use crate::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + + #[test] + fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let ip = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_ip: None, + }, + ) + .unwrap(); + + assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + } + + #[test] + fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { + let on_reverse_proxy = true; + + let error = invoke( + on_reverse_proxy, + &ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: None, + }, + ) + .unwrap_err(); + + assert!(matches!(error, PeerIpResolutionError::MissingRightMostXForwardedForIp { .. })); + } + } +} diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs new file mode 100644 index 000000000..0d561c7bc --- /dev/null +++ b/src/servers/http/v1/services/scrape.rs @@ -0,0 +1,270 @@ +//! The `scrape` service. +//! +//! The service is responsible for handling the `scrape` requests. +//! +//! It delegates the `scrape` logic to the [`Tracker`](crate::core::Tracker::scrape) +//! and it returns the [`ScrapeData`] returned +//! by the [`Tracker`]. +//! +//! It also sends an [`statistics::Event`] +//! because events are specific for the HTTP tracker. +use std::net::IpAddr; +use std::sync::Arc; + +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::core::{statistics, ScrapeData, Tracker}; + +/// The HTTP tracker `scrape` service. +/// +/// The service sends an statistics event that increments: +/// +/// - The number of TCP connections handled by the HTTP tracker. +/// - The number of TCP `scrape` requests handled by the HTTP tracker. +/// +/// > **NOTICE**: as the HTTP tracker does not requires a connection request +/// > like the UDP tracker, the number of TCP connections is incremented for +/// > each `scrape` request. +pub async fn invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { + let scrape_data = tracker.scrape(info_hashes).await; + + send_scrape_event(original_peer_ip, tracker).await; + + scrape_data +} + +/// The HTTP tracker fake `scrape` service. It returns zeroed stats. +/// +/// When the peer is not authenticated and the tracker is running in `private` mode, +/// the tracker returns empty stats for all the torrents. +/// +/// > **NOTICE**: tracker statistics are not updated in this case. +pub async fn fake(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { + send_scrape_event(original_peer_ip, tracker).await; + + ScrapeData::zeroed(info_hashes) +} + +async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { + match original_peer_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; + } + } +} + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + use torrust_tracker_test_helpers::configuration; + + use crate::core::services::tracker_factory; + use crate::core::Tracker; + + fn public_tracker() -> Tracker { + tracker_factory(&configuration::ephemeral_public()) + } + + fn sample_info_hashes() -> Vec { + vec![sample_info_hash()] + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } + } + + mod with_real_data { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use torrust_tracker_test_helpers::configuration; + + use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; + use crate::servers::http::v1::services::scrape::invoke; + use crate::servers::http::v1::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; + + #[tokio::test] + async fn it_should_return_the_scrape_data_for_a_torrent() { + let tracker = Arc::new(public_tracker()); + + let info_hash = sample_info_hash(); + let info_hashes = vec![info_hash]; + + // Announce a new peer to force scrape data to contain not zeroed data + let mut peer = sample_peer(); + let original_peer_ip = peer.ip(); + tracker.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); + + let scrape_data = invoke(&tracker, &info_hashes, &original_peer_ip).await; + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + + invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + + invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + } + } + + mod with_zeroed_data { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::sync::Arc; + + use mockall::predicate::eq; + use torrust_tracker_test_helpers::configuration; + + use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; + use crate::servers::http::v1::services::scrape::fake; + use crate::servers::http::v1::services::scrape::tests::{ + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + }; + + #[tokio::test] + async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { + let tracker = Arc::new(public_tracker()); + + let info_hash = sample_info_hash(); + let info_hashes = vec![info_hash]; + + // Announce a new peer to force scrape data to contain not zeroed data + let mut peer = sample_peer(); + let original_peer_ip = peer.ip(); + tracker.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); + + let scrape_data = fake(&tracker, &info_hashes, &original_peer_ip).await; + + let expected_scrape_data = ScrapeData::zeroed(&info_hashes); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); + + fake(&tracker, &sample_info_hashes(), &peer_ip).await; + } + + #[tokio::test] + async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + Tracker::new( + &configuration::ephemeral().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); + + fake(&tracker, &sample_info_hashes(), &peer_ip).await; + } + } +} diff --git a/src/servers/logging.rs b/src/servers/logging.rs new file mode 100644 index 000000000..ad9ccbbcc --- /dev/null +++ b/src/servers/logging.rs @@ -0,0 +1,29 @@ +/// This is the prefix used in logs to identify a started service. +/// +/// For example: +/// +/// ```text +/// 2024-06-25T12:36:25.025312Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 +/// 2024-06-25T12:36:25.025445Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 +/// 2024-06-25T12:36:25.025527Z INFO API: Started on http://0.0.0.0:1212 +/// 2024-06-25T12:36:25.025580Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 +/// ``` +pub const STARTED_ON: &str = "Started on"; + +/* + +todo: we should use a field fot the URL. + +For example, instead of: + +``` +2024-06-25T12:36:25.025312Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 +``` + +We should use something like: + +``` +2024-06-25T12:36:25.025312Z INFO UDP TRACKER started_at_url=udp://0.0.0.0:6969 +``` + +*/ diff --git a/src/servers/mod.rs b/src/servers/mod.rs new file mode 100644 index 000000000..705a4728e --- /dev/null +++ b/src/servers/mod.rs @@ -0,0 +1,9 @@ +//! Servers. Services that can be started and stopped. +pub mod apis; +pub mod custom_axum_server; +pub mod health_check_api; +pub mod http; +pub mod logging; +pub mod registar; +pub mod signals; +pub mod udp; diff --git a/src/servers/registar.rs b/src/servers/registar.rs new file mode 100644 index 000000000..6b67188dc --- /dev/null +++ b/src/servers/registar.rs @@ -0,0 +1,100 @@ +//! Registar. Registers Services for Health Check. + +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::Arc; + +use derive_more::Constructor; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; + +/// A [`ServiceHeathCheckResult`] is returned by a completed health check. +pub type ServiceHeathCheckResult = Result; + +/// The [`ServiceHealthCheckJob`] has a health check job with it's metadata +/// +/// The `job` awaits a [`ServiceHeathCheckResult`]. +#[derive(Debug, Constructor)] +pub struct ServiceHealthCheckJob { + pub binding: SocketAddr, + pub info: String, + pub job: JoinHandle, +} + +/// The function specification [`FnSpawnServiceHeathCheck`]. +/// +/// A function fulfilling this specification will spawn a new [`ServiceHealthCheckJob`]. +pub type FnSpawnServiceHeathCheck = fn(&SocketAddr) -> ServiceHealthCheckJob; + +/// A [`ServiceRegistration`] is provided to the [`Registar`] for registration. +/// +/// Each registration includes a function that fulfils the [`FnSpawnServiceHeathCheck`] specification. +#[derive(Clone, Debug, Constructor)] +pub struct ServiceRegistration { + binding: SocketAddr, + check_fn: FnSpawnServiceHeathCheck, +} + +impl ServiceRegistration { + #[must_use] + pub fn spawn_check(&self) -> ServiceHealthCheckJob { + (self.check_fn)(&self.binding) + } +} + +/// A [`ServiceRegistrationForm`] will return a completed [`ServiceRegistration`] to the [`Registar`]. +pub type ServiceRegistrationForm = tokio::sync::oneshot::Sender; + +/// The [`ServiceRegistry`] contains each unique [`ServiceRegistration`] by it's [`SocketAddr`]. +pub type ServiceRegistry = Arc>>; + +/// The [`Registar`] manages the [`ServiceRegistry`]. +#[derive(Clone, Debug)] +pub struct Registar { + registry: ServiceRegistry, +} + +#[allow(clippy::derivable_impls)] +impl Default for Registar { + fn default() -> Self { + Self { + registry: ServiceRegistry::default(), + } + } +} + +impl Registar { + pub fn new(register: ServiceRegistry) -> Self { + Self { registry: register } + } + + /// Registers a Service + #[must_use] + pub fn give_form(&self) -> ServiceRegistrationForm { + let (tx, rx) = tokio::sync::oneshot::channel::(); + let register = self.clone(); + tokio::spawn(async move { + register.insert(rx).await; + }); + tx + } + + /// Inserts a listing into the registry. + async fn insert(&self, rx: tokio::sync::oneshot::Receiver) { + tracing::debug!("Waiting for the started service to send registration data ..."); + + let service_registration = rx + .await + .expect("it should receive the service registration from the started service"); + + let mut mutex = self.registry.lock().await; + + mutex.insert(service_registration.binding, service_registration); + } + + /// Returns the [`ServiceRegistry`] of services + #[must_use] + pub fn entries(&self) -> ServiceRegistry { + self.registry.clone() + } +} diff --git a/src/servers/signals.rs b/src/servers/signals.rs new file mode 100644 index 000000000..b83dd5213 --- /dev/null +++ b/src/servers/signals.rs @@ -0,0 +1,86 @@ +//! This module contains functions to handle signals. +use std::time::Duration; + +use derive_more::Display; +use tokio::time::sleep; +use tracing::instrument; + +/// This is the message that the "launcher" spawned task receives from the main +/// application process to notify the service to shutdown. +/// +#[derive(Copy, Clone, Debug, Display)] +pub enum Halted { + Normal, +} + +/// Resolves on `ctrl_c` or the `terminate` signal. +/// +/// # Panics +/// +/// Will panic if the `ctrl_c` or `terminate` signal resolves with an error. +#[instrument(skip())] +pub async fn global_shutdown_signal() { + let ctrl_c = async { + tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + () = ctrl_c => {tracing::warn!("caught interrupt signal (ctrl-c), halting...");}, + () = terminate => {tracing::warn!("caught interrupt signal (terminate), halting...");} + } +} + +/// Resolves when the `stop_receiver` or the `global_shutdown_signal()` resolves. +/// +/// # Panics +/// +/// Will panic if the `stop_receiver` resolves with an error. +#[instrument(skip(rx_halt))] +pub async fn shutdown_signal(rx_halt: tokio::sync::oneshot::Receiver) { + let halt = async { + match rx_halt.await { + Ok(signal) => signal, + Err(err) => panic!("Failed to install stop signal: {err}"), + } + }; + + tokio::select! { + signal = halt => { tracing::debug!("Halt signal processed: {}", signal) }, + () = global_shutdown_signal() => { tracing::debug!("Global shutdown signal processed") } + } +} + +/// Same as `shutdown_signal()`, but shows a message when it resolves. +#[instrument(skip(rx_halt))] +pub async fn shutdown_signal_with_message(rx_halt: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal(rx_halt).await; + + tracing::info!("{message}"); +} + +#[instrument(skip(handle, rx_halt, message))] +pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal_with_message(rx_halt, message).await; + + tracing::debug!("Sending graceful shutdown signal"); + handle.graceful_shutdown(Some(Duration::from_secs(90))); + + println!("!! shuting down in 90 seconds !!"); + + loop { + sleep(Duration::from_secs(1)).await; + + tracing::info!("remaining alive connections: {}", handle.connection_count()); + } +} diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs new file mode 100644 index 000000000..36bf98304 --- /dev/null +++ b/src/servers/udp/connection_cookie.rs @@ -0,0 +1,339 @@ +//! Logic for generating and verifying connection IDs. +//! +//! The UDP tracker requires the client to connect to the server before it can +//! send any data. The server responds with a random 64-bit integer that the +//! client must use to identify itself. +//! +//! This connection ID is used to avoid spoofing attacks. The client must send +//! the connection ID in all requests to the server. The server will ignore any +//! requests that do not contain the correct connection ID. +//! +//! The simplest way to implement this would be to generate a random number when +//! the client connects and store it in a hash table. However, this would +//! require the server to store a large number of connection IDs, which would be +//! a waste of memory. Instead, the server generates a connection ID based on +//! the client's IP address and the current time. This allows the server to +//! verify the connection ID without storing it. +//! +//! This module implements this method of generating connection IDs. It's the +//! most common way to generate connection IDs. The connection ID is generated +//! using a time based algorithm and it is valid for a certain amount of time +//! (usually two minutes). The connection ID is generated using the following: +//! +//! ```text +//! connection ID = hash(client IP + current time slot + secret seed) +//! ``` +//! +//! Time slots are two minute intervals since the Unix epoch. The secret seed is +//! a random number that is generated when the server starts. And the client IP +//! is used in order generate a unique connection ID for each client. +//! +//! The BEP-15 recommends a two-minute time slot. +//! +//! ```text +//! Timestamp (seconds from Unix epoch): +//! |------------|------------|------------|------------| +//! 0 120 240 360 480 +//! Time slots (two-minutes time extents from Unix epoch): +//! |------------|------------|------------|------------| +//! 0 1 2 3 4 +//! Peer connections: +//! Peer A |-------------------------| +//! Peer B |-------------------------| +//! Peer C |------------------| +//! Peer A connects at timestamp 120 slot 1 -> connection ID will be valid from timestamp 120 to 360 +//! Peer B connects at timestamp 240 slot 2 -> connection ID will be valid from timestamp 240 to 480 +//! Peer C connects at timestamp 180 slot 1 -> connection ID will be valid from timestamp 180 to 360 +//! ``` +//! > **NOTICE**: connection ID is always the same for a given peer +//! > (socket address) and time slot. +//! +//! > **NOTICE**: connection ID will be valid for two time extents, **not two +//! > minutes**. It'll be valid for the the current time extent and the next one. +//! +//! Refer to [`Connect`](crate::servers::udp#connect) for more information about +//! the connection process. +//! +//! ## Advantages +//! +//! - It consumes less memory than storing a hash table of connection IDs. +//! - It's easy to implement. +//! - It's fast. +//! +//! ## Disadvantages +//! +//! - It's not very flexible. The connection ID is only valid for a certain amount of time. +//! - It's not very accurate. The connection ID is valid for more than two minutes. +use std::net::SocketAddr; +use std::panic::Location; + +use aquatic_udp_protocol::ConnectionId; +use torrust_tracker_clock::time_extent::{Extent, TimeExtent}; +use zerocopy::network_endian::I64; +use zerocopy::AsBytes; + +use super::error::Error; + +pub type Cookie = [u8; 8]; + +pub type SinceUnixEpochTimeExtent = TimeExtent; + +pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); + +/// Converts a connection ID into a connection cookie. +#[must_use] +pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { + let mut cookie = [0u8; 8]; + connection_id.write_to(&mut cookie); + cookie +} + +/// Converts a connection cookie into a connection ID. +#[must_use] +pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { + ConnectionId(I64::new(i64::from_be_bytes(*connection_cookie))) +} + +/// Generates a new connection cookie. +#[must_use] +pub fn make(remote_address: &SocketAddr) -> Cookie { + let time_extent = cookie_builder::get_last_time_extent(); + + //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); + cookie_builder::build(remote_address, &time_extent) +} + +/// Checks if the supplied `connection_cookie` is valid. +/// +/// # Panics +/// +/// It would panic if the `COOKIE_LIFETIME` constant would be an unreasonably large number. +/// +/// # Errors +/// +/// Will return a `ServerError::InvalidConnectionId` if the supplied `connection_cookie` fails to verify. +pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { + // we loop backwards testing each time_extent until we find one that matches. + // (or the lifetime of time_extents is exhausted) + for offset in 0..=COOKIE_LIFETIME.amount { + let checking_time_extent = cookie_builder::get_last_time_extent().decrease(offset).unwrap(); + + let checking_cookie = cookie_builder::build(remote_address, &checking_time_extent); + //println!("remote_address: {remote_address:?}, time_extent: {checking_time_extent:?}, cookie: {checking_cookie:?}"); + + if *connection_cookie == checking_cookie { + return Ok(checking_time_extent); + } + } + Err(Error::InvalidConnectionId { + location: Location::caller(), + }) +} + +mod cookie_builder { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + use std::net::SocketAddr; + + use torrust_tracker_clock::time_extent::{Extent, Make, TimeExtent}; + + use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; + use crate::shared::crypto::keys::seeds::{Current, Keeper}; + use crate::DefaultTimeExtentMaker; + + pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { + DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) + .unwrap() + .unwrap() + .increase(COOKIE_LIFETIME.amount) + .unwrap() + } + + pub(super) fn build(remote_address: &SocketAddr, time_extent: &TimeExtent) -> Cookie { + let seed = Current::get_seed(); + + let mut hasher = DefaultHasher::new(); + + remote_address.hash(&mut hasher); + time_extent.hash(&mut hasher); + seed.hash(&mut hasher); + + hasher.finish().to_le_bytes() + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self}; + use torrust_tracker_clock::time_extent::{self, Extent}; + + use super::cookie_builder::{self}; + use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; + + // #![feature(const_socketaddr)] + // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + #[test] + fn it_should_make_a_connection_cookie() { + // Note: This constant may need to be updated in the future as the hash + // is not guaranteed to to be stable between versions. + const ID_COOKIE_OLD_HASHER: Cookie = [41, 166, 45, 246, 249, 24, 108, 203]; + const ID_COOKIE_NEW_HASHER: Cookie = [185, 122, 191, 238, 6, 43, 2, 198]; + + clock::Stopped::local_set_to_unix_epoch(); + + let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); + + assert!(cookie == ID_COOKIE_OLD_HASHER || cookie == ID_COOKIE_NEW_HASHER); + } + + #[test] + fn it_should_make_the_same_connection_cookie_for_the_same_input_data() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] + //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] + + assert_eq!(cookie, cookie_2); + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_ip() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::BROADCAST), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 255.255.255.255:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [217, 87, 239, 178, 182, 126, 66, 166] + + assert_ne!(cookie, cookie_2); + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_ip_version() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: [::]:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [99, 119, 230, 177, 20, 220, 163, 187] + + assert_ne!(cookie, cookie_2); + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_socket() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 1); + let time_extent_zero = time_extent::ZERO; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 0.0.0.0:1, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [38, 8, 0, 102, 92, 170, 220, 11] + + assert_ne!(cookie, cookie_2); + } + + #[test] + fn it_should_make_the_different_connection_cookie_for_different_time_extents() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + let time_extent_zero = time_extent::ZERO; + let time_extent_max = time_extent::MAX; + + let cookie = cookie_builder::build(&remote_address, &time_extent_zero); + let cookie_2 = cookie_builder::build(&remote_address, &time_extent_max); + + println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); + println!("remote_address: {remote_address:?}, time_extent: {time_extent_max:?}, cookie: {cookie_2:?}"); + + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] + //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 18446744073709551615.999999999s, amount: 18446744073709551615 }, cookie: [87, 111, 109, 125, 182, 206, 3, 201] + + assert_ne!(cookie, cookie_2); + } + + #[test] + fn it_should_make_different_cookies_for_the_next_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make(&remote_address); + + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + + let cookie_next = make(&remote_address); + + assert_ne!(cookie, cookie_next); + } + + #[test] + fn it_should_be_valid_for_this_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make(&remote_address); + + check(&remote_address, &cookie).unwrap(); + } + + #[test] + fn it_should_be_valid_for_the_next_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make(&remote_address); + + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + + check(&remote_address, &cookie).unwrap(); + } + + #[test] + fn it_should_be_valid_for_the_last_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + clock::Stopped::local_set_to_unix_epoch(); + + let cookie = make(&remote_address); + + clock::Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + + check(&remote_address, &cookie).unwrap(); + } + + #[test] + #[should_panic = "InvalidConnectionId"] + fn it_should_be_not_valid_after_their_last_time_extent() { + let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + + let cookie = make(&remote_address); + + clock::Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + + check(&remote_address, &cookie).unwrap(); + } +} diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs new file mode 100644 index 000000000..315c9d1cf --- /dev/null +++ b/src/servers/udp/error.rs @@ -0,0 +1,36 @@ +//! Error types for the UDP server. +use std::panic::Location; + +use thiserror::Error; +use torrust_tracker_located_error::LocatedError; + +/// Error returned by the UDP server. +#[derive(Error, Debug)] +pub enum Error { + /// Error returned when the domain tracker returns an error. + #[error("tracker server error: {source}")] + TrackerError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + /// Error returned from a third-party library (`aquatic_udp_protocol`). + #[error("internal server error: {message}, {location}")] + InternalServer { + location: &'static Location<'static>, + message: String, + }, + + /// Error returned when the connection id could not be verified. + #[error("connection id could not be verified")] + InvalidConnectionId { location: &'static Location<'static> }, + + /// Error returned when the request is invalid. + #[error("bad request: {source}")] + BadRequest { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + /// Error returned when tracker requires authentication. + #[error("domain tracker requires authentication but is not supported in current UDP implementation. Location: {location}")] + TrackerAuthenticationRequired { location: &'static Location<'static> }, +} diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs new file mode 100644 index 000000000..69a427e0e --- /dev/null +++ b/src/servers/udp/handlers.rs @@ -0,0 +1,1368 @@ +//! Handlers for the UDP server. +use std::fmt; +use std::net::{IpAddr, SocketAddr}; +use std::panic::Location; +use std::sync::Arc; +use std::time::Instant; + +use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceRequest, AnnounceResponse, AnnounceResponseFixedData, ConnectRequest, ConnectResponse, + ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, + ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, +}; +use torrust_tracker_located_error::DynError; +use torrust_tracker_primitives::info_hash::InfoHash; +use tracing::{instrument, Level}; +use uuid::Uuid; +use zerocopy::network_endian::I32; + +use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; +use super::RawRequest; +use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; +use crate::servers::udp::error::Error; +use crate::servers::udp::logging::{log_bad_request, log_error_response, log_request, log_response}; +use crate::servers::udp::peer_builder; +use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; + +/// It handles the incoming UDP packets. +/// +/// It's responsible for: +/// +/// - Parsing the incoming packet. +/// - Delegating the request to the correct handler depending on the request type. +/// +/// It will return an `Error` response if the request is invalid. +#[instrument(skip(udp_request, tracker, local_addr), ret(level = Level::TRACE))] +pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, local_addr: SocketAddr) -> Response { + tracing::debug!("Handling Packets: {udp_request:?}"); + + let start_time = Instant::now(); + + let request_id = RequestId::make(&udp_request); + + match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| { + Error::InternalServer { + message: format!("{e:?}"), + location: Location::caller(), + } + }) { + Ok(request) => { + log_request(&request, &request_id, &local_addr); + + let transaction_id = match &request { + Request::Connect(connect_request) => connect_request.transaction_id, + Request::Announce(announce_request) => announce_request.transaction_id, + Request::Scrape(scrape_request) => scrape_request.transaction_id, + }; + + let response = match handle_request(request, udp_request.from, tracker).await { + Ok(response) => response, + Err(e) => handle_error(&e, transaction_id), + }; + + let latency = start_time.elapsed(); + + log_response(&response, &transaction_id, &request_id, &local_addr, latency); + + response + } + Err(e) => { + log_bad_request(&request_id); + + let response = handle_error( + &Error::BadRequest { + source: (Arc::new(e) as DynError).into(), + }, + TransactionId(I32::new(0)), + ); + + log_error_response(&request_id); + + response + } + } +} + +/// It dispatches the request to the correct handler. +/// +/// # Errors +/// +/// If a error happens in the `handle_request` function, it will just return the `ServerError`. +#[instrument(skip(request, remote_addr, tracker))] +pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: &Tracker) -> Result { + tracing::trace!("handle request"); + + match request { + Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, + Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, + Request::Scrape(scrape_request) => handle_scrape(remote_addr, &scrape_request, tracker).await, + } +} + +/// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) +/// request for more information. +/// +/// # Errors +/// +/// This function does not ever return an error. +#[instrument(skip(tracker), err, ret(level = Level::TRACE))] +pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { + tracing::trace!("handle connect"); + + let connection_cookie = make(&remote_addr); + let connection_id = into_connection_id(&connection_cookie); + + let response = ConnectResponse { + transaction_id: request.transaction_id, + connection_id, + }; + + // send stats event + match remote_addr { + SocketAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Connect).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Connect).await; + } + } + + Ok(Response::from(response)) +} + +/// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) +/// request for more information. +/// +/// # Errors +/// +/// If a error happens in the `handle_announce` function, it will just return the `ServerError`. +#[instrument(skip(tracker), err, ret(level = Level::TRACE))] +pub async fn handle_announce( + remote_addr: SocketAddr, + announce_request: &AnnounceRequest, + tracker: &Tracker, +) -> Result { + tracing::trace!("handle announce"); + + // Authentication + if tracker.requires_authentication() { + return Err(Error::TrackerAuthenticationRequired { + location: Location::caller(), + }); + } + + check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; + + let info_hash = announce_request.info_hash.into(); + let remote_client_ip = remote_addr.ip(); + + // Authorization + tracker.authorize(&info_hash).await.map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + })?; + + let mut peer = peer_builder::from_request(announce_request, &remote_client_ip); + let peers_wanted: PeersWanted = i32::from(announce_request.peers_wanted.0).into(); + + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); + + match remote_client_ip { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Announce).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Announce).await; + } + } + + #[allow(clippy::cast_possible_truncation)] + if remote_addr.is_ipv4() { + let announce_response = AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: announce_request.transaction_id, + announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + }, + peers: response + .peers + .iter() + .filter_map(|peer| { + if let IpAddr::V4(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), + }) + } else { + None + } + }) + .collect(), + }; + + Ok(Response::from(announce_response)) + } else { + let announce_response = AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: announce_request.transaction_id, + announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + }, + peers: response + .peers + .iter() + .filter_map(|peer| { + if let IpAddr::V6(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), + }) + } else { + None + } + }) + .collect(), + }; + + Ok(Response::from(announce_response)) + } +} + +/// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) +/// request for more information. +/// +/// # Errors +/// +/// This function does not ever return an error. +#[instrument(skip(tracker), err, ret(level = Level::TRACE))] +pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { + tracing::trace!("handle scrape"); + + // Convert from aquatic infohashes + let mut info_hashes: Vec = vec![]; + for info_hash in &request.info_hashes { + info_hashes.push((*info_hash).into()); + } + + let scrape_data = if tracker.requires_authentication() { + ScrapeData::zeroed(&info_hashes) + } else { + tracker.scrape(&info_hashes).await + }; + + let mut torrent_stats: Vec = Vec::new(); + + for file in &scrape_data.files { + let swarm_metadata = file.1; + + #[allow(clippy::cast_possible_truncation)] + let scrape_entry = { + TorrentScrapeStatistics { + seeders: NumberOfPeers(I32::new(i64::from(swarm_metadata.complete) as i32)), + completed: NumberOfDownloads(I32::new(i64::from(swarm_metadata.downloaded) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(swarm_metadata.incomplete) as i32)), + } + }; + + torrent_stats.push(scrape_entry); + } + + // send stats event + match remote_addr { + SocketAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Scrape).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Scrape).await; + } + } + + let response = ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats, + }; + + Ok(Response::from(response)) +} + +fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { + let message = e.to_string(); + Response::from(ErrorResponse { + transaction_id, + message: message.into(), + }) +} + +/// An identifier for a request. +#[derive(Debug, Clone)] +pub struct RequestId(Uuid); + +impl RequestId { + fn make(_request: &RawRequest) -> RequestId { + RequestId(Uuid::new_v4()) + } +} + +impl fmt::Display for RequestId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{NumberOfBytes, PeerId}; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::peer; + use torrust_tracker_test_helpers::configuration; + + use crate::core::services::tracker_factory; + use crate::core::Tracker; + use crate::CurrentClock; + + fn tracker_configuration() -> Configuration { + default_testing_tracker_configuration() + } + + fn default_testing_tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + fn public_tracker() -> Arc { + initialized_tracker(&configuration::ephemeral_public()) + } + + fn private_tracker() -> Arc { + initialized_tracker(&configuration::ephemeral_private()) + } + + fn whitelisted_tracker() -> Arc { + initialized_tracker(&configuration::ephemeral_listed()) + } + + fn initialized_tracker(configuration: &Configuration) -> Arc { + tracker_factory(configuration).into() + } + + fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() + } + + fn sample_ipv6_remote_addr() -> SocketAddr { + sample_ipv6_socket_address() + } + + fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + fn sample_ipv6_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + } + + #[derive(Debug, Default)] + pub struct TorrentPeerBuilder { + peer: peer::Peer, + } + + impl TorrentPeerBuilder { + #[must_use] + pub fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } + } + + #[must_use] + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + self.peer.peer_id = peer_id; + self + } + + #[must_use] + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes::new(left); + self + } + + #[must_use] + pub fn into(self) -> peer::Peer { + self.peer + } + } + + struct TrackerConfigurationBuilder { + configuration: Configuration, + } + + impl TrackerConfigurationBuilder { + pub fn default() -> TrackerConfigurationBuilder { + let default_configuration = default_testing_tracker_configuration(); + TrackerConfigurationBuilder { + configuration: default_configuration, + } + } + + pub fn with_external_ip(mut self, external_ip: &str) -> Self { + self.configuration.core.net.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); + self + } + + pub fn into(self) -> Configuration { + self.configuration + } + } + + mod connect_request { + + use std::future; + use std::sync::Arc; + + use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + use mockall::predicate::eq; + + use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; + use crate::core::{self, statistics}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_connect; + use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; + + fn sample_connect_request() -> ConnectRequest { + ConnectRequest { + transaction_id: TransactionId(0i32.into()), + } + } + + #[tokio::test] + async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let request = ConnectRequest { + transaction_id: TransactionId(0i32.into()), + }; + + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) + .await + .unwrap(); + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id() { + let request = ConnectRequest { + transaction_id: TransactionId(0i32.into()), + }; + + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) + .await + .unwrap(); + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp4Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let client_socket_address = sample_ipv4_socket_address(); + + let torrent_tracker = Arc::new( + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + handle_connect(client_socket_address, &sample_connect_request(), &torrent_tracker) + .await + .unwrap(); + } + + #[tokio::test] + async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp6Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let torrent_tracker = Arc::new( + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), &torrent_tracker) + .await + .unwrap(); + } + } + + mod announce_request { + + use std::net::Ipv4Addr; + use std::num::NonZeroU16; + + use aquatic_udp_protocol::{ + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId as AquaticPeerId, PeerKey, Port, TransactionId, + }; + + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::tests::sample_ipv4_remote_addr; + + struct AnnounceRequestBuilder { + request: AnnounceRequest, + } + + impl AnnounceRequestBuilder { + pub fn default() -> AnnounceRequestBuilder { + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); + + let default_request = AnnounceRequest { + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id: TransactionId(0i32.into()), + info_hash: info_hash_aquatic, + peer_id: AquaticPeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: client_ip.into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers::new(1i32), + port: Port::new(NonZeroU16::new(client_port).expect("a non-zero client port")), + }; + AnnounceRequestBuilder { + request: default_request, + } + } + + pub fn with_connection_id(mut self, connection_id: ConnectionId) -> Self { + self.request.connection_id = connection_id; + self + } + + pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { + self.request.info_hash = info_hash; + self + } + + pub fn with_peer_id(mut self, peer_id: AquaticPeerId) -> Self { + self.request.peer_id = peer_id; + self + } + + pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { + self.request.ip_address = ip_address.into(); + self + } + + pub fn with_port(mut self, port: u16) -> Self { + self.request.port = Port(port.into()); + self + } + + pub fn into(self) -> AnnounceRequest { + self.request + } + } + + mod using_ipv4 { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, + PeerId as AquaticPeerId, Response, ResponsePeer, + }; + use mockall::predicate::eq; + + use crate::core::{self, statistics}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let tracker = public_tracker(); + + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_torrent_peers(&info_hash.0.into()); + + let expected_peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .into(); + + assert_eq!(peers[0], Arc::new(expected_peer)); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let tracker = public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = Ipv4Addr::new(126, 0, 0, 1); + let remote_client_port = 8081; + let peer_address = Ipv4Addr::new(126, 0, 0, 2); + + let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_torrent_peers(&info_hash.0.into()); + + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); + } + + fn add_a_torrent_peer_using_ipv6(tracker: &Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv6 = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6); + } + + async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap() + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { + let tracker = public_tracker(); + + add_a_torrent_peer_using_ipv6(&tracker); + + let response = announce_a_new_peer_using_ipv4(tracker.clone()).await; + + // The response should not contain the peer using IPV6 + let peers: Option>> = match response { + Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv6_peers = peers.is_none(); + assert!(no_ipv6_peers); + } + + #[tokio::test] + async fn should_send_the_upd4_announce_event() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + handle_announce( + sample_ipv4_socket_address(), + &AnnounceRequestBuilder::default().into(), + &tracker, + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { + let tracker = public_tracker(); + + let client_ip = Ipv4Addr::new(127, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_torrent_peers(&info_hash.0.into()); + + let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); + + let expected_peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) + .into(); + + assert_eq!(peers[0], Arc::new(expected_peer)); + } + } + } + + mod using_ipv6 { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, + PeerId as AquaticPeerId, Response, ResponsePeer, + }; + use mockall::predicate::eq; + + use crate::core::{self, statistics}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let tracker = public_tracker(); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_torrent_peers(&info_hash.0.into()); + + let expected_peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + assert_eq!(peers[0], Arc::new(expected_peer)); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let tracker = public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = "::100".parse().unwrap(); // IPV4 ::0.0.1.0 -> IPV6 = ::100 = ::ffff:0:100 = 0:0:0:0:0:ffff:0:0100 + let remote_client_port = 8081; + let peer_address = "126.0.0.1".parse().unwrap(); + + let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_torrent_peers(&info_hash.0.into()); + + // When using IPv6 the tracker converts the remote client ip into a IPv4 address + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); + } + + fn add_a_torrent_peer_using_ipv4(tracker: &Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv4 = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .into(); + + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4); + } + + async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap() + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { + let tracker = public_tracker(); + + add_a_torrent_peer_using_ipv4(&tracker); + + let response = announce_a_new_peer_using_ipv6(tracker.clone()).await; + + // The response should not contain the peer using IPV4 + let peers: Option>> = match response { + Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv4_peers = peers.is_none(); + assert!(no_ipv4_peers); + } + + #[tokio::test] + async fn should_send_the_upd6_announce_event() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let tracker = Arc::new( + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + let remote_addr = sample_ipv6_remote_addr(); + + let announce_request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .into(); + + handle_announce(remote_addr, &announce_request, &tracker).await.unwrap(); + } + + mod from_a_loopback_ip { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::core; + use crate::core::statistics::Keeper; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_announce; + use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::TrackerConfigurationBuilder; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + let tracker = + Arc::new(core::Tracker::new(&configuration.core, Some(stats_event_sender), stats_repository).unwrap()); + + let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); + let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + + let client_ip_v4 = loopback_ipv4; + let client_ip_v6 = loopback_ipv6; + let client_port = 8080; + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + handle_announce(remote_addr, &request, &tracker).await.unwrap(); + + let peers = tracker.get_torrent_peers(&info_hash.0.into()); + + let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); + + assert!(external_ip_in_tracker_configuration.is_ipv6()); + + // There's a special type of IPv6 addresses that provide compatibility with IPv4. + // The last 32 bits of these addresses represent an IPv4, and are represented like this: + // 1111:2222:3333:4444:5555:6666:1.2.3.4 + // + // ::127.0.0.1 is the IPV6 representation for the IPV4 address 127.0.0.1. + assert_eq!(Ok(peers[0].peer_addr.ip()), "::126.0.0.1".parse()); + } + } + } + } + + mod scrape_request { + use std::net::SocketAddr; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + InfoHash, NumberOfDownloads, NumberOfPeers, PeerId, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, + TransactionId, + }; + + use super::TorrentPeerBuilder; + use crate::core::{self}; + use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; + + fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), + } + } + + #[tokio::test] + async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { + let remote_addr = sample_ipv4_remote_addr(); + + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + let request = ScrapeRequest { + connection_id: into_connection_id(&make(&remote_addr)), + transaction_id: TransactionId(0i32.into()), + info_hashes, + }; + + let response = handle_scrape(remote_addr, &request, &public_tracker()).await.unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!( + response, + Response::from(ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats: expected_torrent_stats + }) + ); + } + + async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + let peer_id = PeerId([255u8; 20]); + + let peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(*remote_addr) + .with_number_of_bytes_left(0) + .into(); + + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer); + } + + fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { + let info_hashes = vec![*info_hash]; + + ScrapeRequest { + connection_id: into_connection_id(&make(remote_addr)), + transaction_id: TransactionId::new(0i32), + info_hashes, + } + } + + async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + handle_scrape(remote_addr, &request, &tracker).await.unwrap() + } + + fn match_scrape_response(response: Response) -> Option { + match response { + Response::Scrape(scrape_response) => Some(scrape_response), + _ => None, + } + } + + mod with_a_public_tracker { + use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::servers::udp::handlers::tests::public_tracker; + use crate::servers::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; + + #[tokio::test] + async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { + let tracker = public_tracker(); + + let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), + }]; + + assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); + } + } + + mod with_a_private_tracker { + + use aquatic_udp_protocol::InfoHash; + + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::scrape_request::{ + add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, + }; + use crate::servers::udp::handlers::tests::{private_tracker, sample_ipv4_remote_addr}; + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { + let tracker = private_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let non_existing_info_hash = InfoHash([0u8; 20]); + + let request = build_scrape_request(&remote_addr, &non_existing_info_hash); + + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_tracker_has_the_requested_torrent_because_authenticated_requests_are_not_supported_in_udp_tracker( + ) { + let tracker = private_tracker(); + + let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + mod with_a_whitelisted_tracker { + use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::scrape_request::{ + add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, + }; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, whitelisted_tracker}; + + #[tokio::test] + async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { + let tracker = whitelisted_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + tracker.add_torrent_to_memory_whitelist(&info_hash.0.into()).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), + }]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { + let tracker = whitelisted_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + fn sample_scrape_request(remote_addr: &SocketAddr) -> ScrapeRequest { + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + ScrapeRequest { + connection_id: into_connection_id(&make(remote_addr)), + transaction_id: TransactionId(0i32.into()), + info_hashes, + } + } + + mod using_ipv4 { + use std::future; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::sample_scrape_request; + use crate::core::{self, statistics}; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; + + #[tokio::test] + async fn should_send_the_upd4_scrape_event() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let remote_addr = sample_ipv4_remote_addr(); + let tracker = Arc::new( + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) + .await + .unwrap(); + } + } + + mod using_ipv6 { + use std::future; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::sample_scrape_request; + use crate::core::{self, statistics}; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; + + #[tokio::test] + async fn should_send_the_upd6_scrape_event() { + let mut stats_event_sender_mock = statistics::MockEventSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::Event::Udp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender = Box::new(stats_event_sender_mock); + + let remote_addr = sample_ipv6_remote_addr(); + let tracker = Arc::new( + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), + ); + + handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) + .await + .unwrap(); + } + } + } +} diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs new file mode 100644 index 000000000..3891278d7 --- /dev/null +++ b/src/servers/udp/logging.rs @@ -0,0 +1,87 @@ +//! Logging for UDP Tracker requests and responses. + +use std::net::SocketAddr; +use std::time::Duration; + +use aquatic_udp_protocol::{Request, Response, TransactionId}; +use torrust_tracker_primitives::info_hash::InfoHash; + +use super::handlers::RequestId; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr: &SocketAddr) { + let action = map_action_name(request); + + match &request { + Request::Connect(connect_request) => { + let transaction_id = connect_request.transaction_id; + let transaction_id_str = transaction_id.0.to_string(); + + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id); + } + Request::Announce(announce_request) => { + let transaction_id = announce_request.transaction_id; + let transaction_id_str = transaction_id.0.to_string(); + let connection_id_str = announce_request.connection_id.0.to_string(); + let info_hash_str = InfoHash::from_bytes(&announce_request.info_hash.0).to_hex_string(); + + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id, connection_id = %connection_id_str, info_hash = %info_hash_str); + } + Request::Scrape(scrape_request) => { + let transaction_id = scrape_request.transaction_id; + let transaction_id_str = transaction_id.0.to_string(); + let connection_id_str = scrape_request.connection_id.0.to_string(); + + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, + "request", + server_socket_addr = %server_socket_addr, + action = %action, + transaction_id = %transaction_id_str, + request_id = %request_id, + connection_id = %connection_id_str); + } + }; +} + +fn map_action_name(udp_request: &Request) -> String { + match udp_request { + Request::Connect(_connect_request) => "CONNECT".to_owned(), + Request::Announce(_announce_request) => "ANNOUNCE".to_owned(), + Request::Scrape(_scrape_request) => "SCRAPE".to_owned(), + } +} + +pub fn log_response( + _response: &Response, + transaction_id: &TransactionId, + request_id: &RequestId, + server_socket_addr: &SocketAddr, + latency: Duration, +) { + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, + "response", + server_socket_addr = %server_socket_addr, + transaction_id = %transaction_id.0.to_string(), + request_id = %request_id, + latency_ms = %latency.as_millis()); +} + +pub fn log_bad_request(request_id: &RequestId) { + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "bad request", request_id = %request_id); +} + +pub fn log_error_response(request_id: &RequestId) { + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "response", request_id = %request_id); +} diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs new file mode 100644 index 000000000..91b19a91d --- /dev/null +++ b/src/servers/udp/mod.rs @@ -0,0 +1,662 @@ +//! UDP Tracker. +//! +//! This module contains the UDP tracker implementation. +//! +//! The UDP tracker is a simple UDP server that responds to these requests: +//! +//! - `Connect`: used to get a connection ID which must be provided on each +//! request in order to avoid spoofing the source address of the UDP packets. +//! - `Announce`: used to announce the presence of a peer to the tracker. +//! - `Scrape`: used to get information about a torrent. +//! +//! It was introduced in [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +//! as an alternative to the [HTTP tracker](https://www.bittorrent.org/beps/bep_0003.html). +//! The UDP tracker is more efficient than the HTTP tracker because it uses UDP +//! instead of TCP. +//! +//! Refer to the [`bit_torrent`](crate::shared::bit_torrent) module for more +//! information about the `BitTorrent` protocol. +//! +//! Refer to [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +//! and to [BEP 41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html) +//! for more information about the UDP tracker protocol. +//! +//! > **NOTICE**: [BEP-41](https://www.bittorrent.org/beps/bep_0041.html) is not +//! > implemented yet. +//! +//! > **NOTICE**: we are using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) +//! > crate so requests and responses are handled by it. +//! +//! > **NOTICE**: all values are send in network byte order ([big endian](https://en.wikipedia.org/wiki/Endianness)). +//! +//! ## Table of Contents +//! +//! - [Actions](#actions) +//! - [Connect](#connect) +//! - [Connect Request](#connect-request) +//! - [Connect Response](#connect-response) +//! - [Announce](#announce) +//! - [Announce Request](#announce-request) +//! - [Announce Response](#announce-response) +//! - [Scrape](#scrape) +//! - [Scrape Request](#scrape-request) +//! - [Scrape Response](#scrape-response) +//! - [Errors](#errors) +//! - [Extensions](#extensions) +//! - [Links](#links) +//! - [Credits](#credits) +//! +//! ## Actions +//! +//! Requests are sent to the tracker using UDP packets. The UDP tracker protocol +//! is designed to be as simple as possible. It uses a single UDP port and +//! supports only three types of requests: `Connect`, `Announce` and `Scrape`. +//! +//! Request are parsed from UDP packets using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) +//! crate and then handled by the [`Tracker`](crate::core::Tracker) struct. +//! And then the response is also build using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) +//! and converted to a UDP packet. +//! +//! ```text +//! UDP packet -> Aquatic Struct Request -> [Torrust Struct Request] -> Tracker -> Aquatic Struct Response -> UDP packet +//! ``` +//! +//! ### Connect +//! +//! `Connect` requests are used to get a connection ID which must be provided on +//! each request in order to avoid spoofing the source address of the UDP. +//! +//! The connection ID is a random 64-bit integer that is used to identify the +//! client. It is used to prevent spoofing of the source address of the UDP +//! packets. Before announcing or scraping, you have to obtain a connection ID. +//! +//! The connection ID is generated by the tracker and sent back to the client's +//! IP address. Only the client using that IP can receive the response, so the +//! tracker can be sure that the client is the one who sent the request. If the +//! client's IP was spoofed the tracker will send the response to the wrong +//! client and the client will not receive it. +//! +//! The reason why the UDP tracker protocol needs a connection ID to avoid IP +//! spoofing can be explained as follows: +//! +//! 1. No connection state: Unlike TCP, UDP is a connectionless protocol, +//! meaning that it does not establish a connection between two endpoints before +//! exchanging data. As a result, it is more susceptible to IP spoofing, where +//! an attacker sends packets with a forged source IP address, tricking the +//! receiver into believing that they are coming from a legitimate source. +//! +//! 2. Mitigating IP spoofing: To mitigate IP spoofing in the UDP tracker +//! protocol, a connection ID is used. When a client wants to interact with a +//! tracker, it sends a "connect" request to the tracker, which, in turn, +//! responds with a unique connection ID. This connection ID must be included in +//! all subsequent requests from the client to the tracker. +//! +//! 3. Validating requests: By requiring the connection ID, the tracker can +//! verify that the requests are coming from the same client that initially sent +//! the "connect" request. If an attacker attempts to spoof the client's IP +//! address, they would also need to know the valid connection ID to be accepted +//! by the tracker. This makes it significantly more challenging for an attacker +//! to spoof IP addresses and disrupt the P2P network. +//! +//! There are different ways to generate a connection ID. The most common way is +//! to generate a time bound secret. The secret is generated using a time based +//! algorithm and it is valid for a certain amount of time. +//! +//! ```text +//! connection ID = hash(client IP + current time slot + secret seed) +//! ``` +//! +//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`] +//! for more information about the connection ID generation with this method. +//! +//! #### Connect Request +//! +//! **Connect request (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -------|-------------------|------------------|-------------------------------------------------|-----------------------------|----------------- +//! 0 | [`i64`](std::i64) | `protocol_id` | Magic constant that will identify the protocol. | `0x00_00_04_17_27_10_19_80` | `4497486125440` +//! 8 | [`i32`](std::i32) | `action` | Action identifying the connect request. | `0x00_00_00_00` | `0` +//! 12 | [`i32`](std::i32) | `transaction_id` | Randomly generated by the client. | `0x34_FA_A1_F9` | `-888840697` +//! +//! **Sample connect request (UDP packet)** +//! +//! UDP packet bytes: +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +//! Decimal: [ 0, 0, 4, 23, 39, 16, 25, 128, 0, 0, 0, 0, 203, 5, 94, 7] +//! Hex: [0x00, 0x00, 0x04, 0x17, 0x27, 0x10, 0x19, 0x80, 0x00, 0x00, 0x00, 0x00, 0xCB, 0x05, 0x5E, 0x07] +//! Param: [<------------- protocol_id ------------------>,<------- action ------>,<--- transaction_id -->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes Dec (Big Endian) | Hex | Decimal +//! -------|-------------------|------------------|--------------------------------|-----------------------------|---------------- +//! 0 | [`i64`](std::i64) | `protocol_id` | [0, 0, 4, 23, 39, 16, 25, 128] | `0x00_00_04_17_27_10_19_80` | `4497486125440` +//! 4 | [`i32`](std::i32) | `action` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! 8 | [`i32`](std::i32) | `transaction_id` | [35, 63, 226, 1] | `0xCB_05_5E_07` | `-888840697` +//! +//! **Connect request (parsed struct)** +//! +//! After parsing the UDP packet, the [`ConnectRequest`](aquatic_udp_protocol::request::ConnectRequest) +//! request struct will look like this: +//! +//! Field | Type | Example +//! -----------------|----------------------------------------------------------------|------------- +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `1950635409` +//! +//! #### Connect Response +//! +//! **Connect response (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -------|-------------------|------------------|-------------------------------------------------------|-----------------------------|----------------------- +//! 0 | [`i64`](std::i32) | `action` | Action identifying the connect request | `0x00_00_00_00` | `0` +//! 4 | [`i32`](std::i32) | `transaction_id` | Must match the `transaction_id` sent from the client. | `0xCB_05_5E_07` | `-888840697` +//! 8 | [`i32`](std::i64) | `connection_id` | Generated by the tracker to authenticate the client. | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! +//! > **NOTICE**: the `connection_id` is used when further information is +//! > exchanged with the tracker, to identify the client. This `connection_id` can +//! > be reused for multiple requests, but if it's cached for too long, it will +//! > not be valid anymore. +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! **Sample connect response (UDP packet)** +//! +//! UDP packet bytes: +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +//! Decimal: [ 0, 0, 0, 0, 203, 5, 94, 7, 197, 88, 124, 9, 8, 72, 216, 55] +//! Hex: [0x00, 0x00, 0x00, 0x00, 0xCB, 0x05, 0x5E, 0x07, 0xC5, 0x58, 0x7C, 0x09, 0x08, 0x48, 0xD8, 0x37] +//! Param: [<------ action ------>,<-- transaction_id --->,<--------------- connection_id --------------->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! -------|-------------------|------------------|-----------------------------------|------------------------------|----------------------- +//! 0 | [`i64`](std::i32) | `action` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! 4 | [`i64`](std::i32) | `transaction_id` | [203, 5, 94, 7] | `0xCB_05_5E_07` | `-888840697` +//! 8 | [`i64`](std::i64) | `connection_id` | [197, 88, 124, 9, 8, 72, 216, 55] | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! **Connect response (struct)** +//! +//! Before building the UDP packet, the [`ConnectResponse`](aquatic_udp_protocol::response::ConnectResponse) +//! struct will look like this: +//! +//! Field | Type | Example +//! -----------------|----------------------------------------------------------------|------------------------- +//! `connection_id` | [`ConnectionId`](aquatic_udp_protocol::common::ConnectionId) | `-4226491872051668937` +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-888840697` +//! +//! **Connect specification** +//! +//! Original specification in [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html). +//! +//! ### Announce +//! +//! `Announce` requests are used to announce the presence of a peer to the +//! tracker. The tracker responds with a list of peers that are also downloading +//! the same torrent. A "swarm" is a group of peers that are downloading the +//! same torrent. +//! +//! #### Announce Request +//! +//! **Announce request (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -------|-------------------|------------------|--------------------------------------------------------------|-----------------------------------------------------------------|---------------------------------------------------------- +//! 0 | [`i64`](std::i64) | `connection_id` | The connection id acquired from establishing the connection. | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! 8 | [`i32`](std::i32) | `action` | Action for announce request. | `0x00_00_00_01` | `1` +//! 12 | [`i32`](std::i32) | `transaction_id` | Randomly generated by the client. | `0xA2_F9_54_48` | `-1560718264` +//! 16 | 20-byte | `info_hash` | The infohash of the torrent being announced. | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! 36 | 20-byte | `peer_id` | The ID of the peer announcing the torrent. | `0x2D_71_42_34_34_31_30_2D_29_53_64_7E_64_65_34_78_4D_70_36_44` | `259430336069436570531165609119312093997849130564` +//! 56 | [`i64`](std::i64) | `downloaded` | The number of bytes the peer has downloaded so far. | `0x00_00_00_00_00_00_00_00` | `0` +//! 64 | [`i64`](std::i64) | `left` | The number of bytes left to download by the peer. | `0x00_00_00_00_00_00_00_00` | `0` +//! 72 | [`i64`](std::i64) | `uploaded` | The number of bytes the peer has uploaded so far. | `0x00_00_00_00_00_00_00_00` | `0` +//! 80 | [`i32`](std::i32) | `event` | The event the peer is reporting to the tracker. | `0x0`, `0x1`, `0x2`, `0x3` | `0`: none; `1`: completed; `2`: started; `3`: stopped +//! 84 | [`i32`](std::i32) | `IP address` | The peer IP. Ignored by the tracker. It uses the Sender's IP.| `0x00_00_00_00` | `0` +//! 88 | [`i32`](std::i32) | `key` | A unique key that is randomized by the client. | `0xEF_34_95_D6` | `-281766442` +//! 92 | [`i32`](std::i32) | `num_want` | The maximum number of peers the peer wants in the response. | `0x00_00_00_C8` | `200` +//! 96 | [`i16`](std::i16) | `port` | The port the peer is listening on. | `0x44_8C` | `17548` +//! +//! **Peer IP address** +//! +//! The peer IP address is always ignored by the tracker. It uses the sender's +//! IP address. +//! +//! _"Do note that most trackers will only honor the IP address field under +//! limited circumstances."_ ([BEP 15](https://www.bittorrent.org/beps/bep_0015.html)). +//! +//! Although not supported by this tracker a UDP tracker can use the IP address +//! provided by the peer in the announce request under specific circumstances +//! when it cannot rely on the source IP address of the incoming request. These +//! circumstances might include: +//! +//! 1. Network Address Translation (NAT): In cases where a peer is behind a NAT, +//! the private IP address of the peer is not directly routable over the +//! internet. The NAT device translates the private IP address to a public one +//! when sending packets to the tracker. The public IP address is what the +//! tracker sees as the source IP of the incoming request. However, if the peer +//! provides its private IP address in the announce request, the tracker can use +//! this information to facilitate communication between peers in the same +//! private network. +//! +//! 2. Proxy or VPN usage: If a peer uses a proxy or VPN service to connect to +//! the tracker, the source IP address seen by the tracker will be the one +//! assigned by the proxy or VPN server. In this case, if the peer provides its +//! actual IP address in the announce request, the tracker can use it to +//! establish a direct connection with other peers, bypassing the proxy or VPN +//! server. This might improve performance or help in cases where some peers +//! cannot connect to the proxy or VPN server. +//! +//! 3. Tracker is behind a NAT, firewall, proxy, VPN, or load balancer: In cases +//! where the tracker is behind a NAT, firewall, proxy, VPN, or load balancer, +//! the source IP address of the incoming request will be the public IP address +//! of the NAT, firewall, proxy, VPN, or load balancer. If the peer provides its +//! private IP address in the announce request, the tracker can use this +//! information to establish a direct connection with the peer. +//! +//! It's important to note that using the provided IP address can pose security +//! risks, as malicious peers might spoof their IP addresses in the announce +//! request to perform various types of attacks. +//! +//! > **NOTICE**: The current tracker behavior is to ignore the IP address +//! > provided by the peer, and use the source IP address of the incoming request, +//! > when the tracker is not running behind a proxy, and to use the right-most IP +//! > address in the `X-Forwarded-For` header when the tracker is running behind a +//! > proxy. +//! +//! > **NOTICE**: The tracker also changes the peer IP address to the tracker +//! > external IP when the peer is using a loopback IP address. +//! +//! **Sample announce request (UDP packet)** +//! +//! Some values used in the sample request: +//! +//! - Infohash: `0x03840548643AF2A7B63A9F5CBCA348BC7150CA3A` +//! - Peer ID: `0x2D7142343431302D2953647E646534784D703644` +//! +//! UDP packet bytes: +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100] +//! Decimal: [ 197, 88, 124, 9, 8, 72, 216, 55, 0, 0, 0, 1, 162, 249, 84, 72, 3, 132, 5, 72, 100, 58, 242, 167, 182, 58, 159, 92, 188, 163, 72, 188, 113, 80, 202, 58, 45, 113, 66, 52, 52, 49, 48, 45, 41, 83, 100, 126, 100, 101, 52, 120, 77, 112, 54, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 239, 52, 149, 214, 0, 0, 0, 200, 68, 140, 2, 1, 47] +//! Hex: [ 0xC5, 0x58, 0x7C, 0x09, 0x08, 0x48, 0xD8, 0x37, 0x00, 0x00, 0x00, 0x01, 0xA2, 0xF9, 0x54, 0x48, 0x03, 0x84, 0x05, 0x48, 0x64, 0x3A, 0xF2, 0xA7, 0xB6, 0x3A, 0x9F, 0x5C, 0xBC, 0xA3, 0x48, 0xBC, 0x71, 0x50, 0xCA, 0x3A, 0x2D, 0x71, 0x42, 0x34, 0x34, 0x31, 0x30, 0x2D, 0x29, 0x53, 0x64, 0x7E, 0x64, 0x65, 0x34, 0x78, 0x4D, 0x70, 0x36, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0xEF, 0x34, 0x95, 0xD6, 0x00, 0x00, 0x00, 0xC8, 0x44, 0x8C, 0x02, 0x01, 0x2F] +//! Param: [<--------------- connection_id --------------->,<--------- action ---->,<-- transaction_id --->,<--------------------------------------------------------- info_hash ------------------------------------------------->,<---------------------------------------------- peer_id -------------------------------------------------------------->,<------------------- downloaded -------------->,<-------------------- left ------------------->,<---------------- uploaded ------------------->,<-------- event ------>,<----- IP address ---->,<--------- key ------->,<------ num_want ----->,<-- port --><---- BEP 41 --->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes Dec (Big Endian) | Hex | Decimal +//! -------|-------------------|-------------------|--------------------------------------------------------------------------|-----------------------------------------------------------------|---------------------------------------------------- +//! 0 | [`i64`](std::i64) | `connection_id` | `[197,88,124,9,8,72,216,55]` | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! 8 | [`i32`](std::i32) | `action` | `[0,0,0,1]` | `0x00_00_00_01` | `1` +//! 12 | [`i32`](std::i32) | `transaction_id` | `[162,249,84,72]` | `0xA2_F9_54_48` | `-1560718264` +//! 16 | 20 bytes | `info_hash` | `[3,132,5,72,100,58,242,167,182,58,159,92,188,163,72,188,113,80,202,58]` | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! 36 | 20 bytes | `peer_id` | `[45,113,66,52,52,49,48,45,41,83,100,126,100,101,52,120,77,112,54,68]` | `0x2D_71_42_34_34_31_30_2D_29_53_64_7E_64_65_34_78_4D_70_36_44` | `259430336069436570531165609119312093997849130564` +//! 56 | [`i64`](std::i64) | `downloaded` | `[0,0,0,0,0,0,0,0]` | `0x00_00_00_00_00_00_00_00` | `0` +//! 64 | [`i64`](std::i64) | `left` | `[0,0,0,0,0,0,0,0]` | `0x00_00_00_00_00_00_00_00` | `0` +//! 72 | [`i64`](std::i64) | `uploaded` | `[0,0,0,0,0,0,0,0]` | `0x00_00_00_00_00_00_00_00` | `0` +//! 80 | [`i32`](std::i32) | `event` | `[0,0,0,2]` | `0x00_00_00_02` | `2` (`Started`) +//! 84 | [`i32`](std::i32) | `IP address` | `[0,0,0,0]` | `0x00_00_00_00` | `0` +//! 88 | [`i32`](std::i32) | `key` | `[239,52,149,214]` | `0xEF_34_95_D6` | `-281766442` +//! 92 | [`i32`](std::i32) | `num_want` | `[0,0,0,200]` | `0x00_00_00_C8` | `200` +//! 96 | [`i16`](std::i16) | `port` | `[8,140]` | `0x44_8C` | `17548` +//! 98 | 1 byte | `Option-Type` | `[2]` | `0x02` | `2` +//! 99 | 2 byte | `Length Byte` | `[1,47]` | `0x01_2F` | `303` +//! 101 | N bytes | | | | +//! +//! > **NOTICE**: bytes after offset 98 are part of the [BEP-41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html). +//! > There are three options defined for byte 98: `0x0` (`EndOfOptions`), `0x1` (`NOP`) and `0x2` (`URLData`). +//! +//! > **NOTICE**: `num_want` is being ignored by the tracker. Refer to +//! > [issue 262](https://github.com/torrust/torrust-tracker/issues/262) for more +//! > information. +//! +//! **Announce request (parsed struct)** +//! +//! After parsing the UDP packet, the [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) +//! struct will contain the following fields: +//! +//! Field | Type | Example +//! -------------------|---------------------------------------------------------------- |-------------- +//! `connection_id` | [`ConnectionId`](aquatic_udp_protocol::common::ConnectionId) | `-4226491872051668937` +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-1560718264` +//! `info_hash` | [`InfoHash`](aquatic_udp_protocol::common::InfoHash) | `[3,132,5,72,100,58,242,167,182,58,159,92,188,163,72,188,113,80,202,58]` +//! `peer_id` | [`PeerId`](aquatic_udp_protocol::common::PeerId) | `[45,113,66,52,52,49,48,45,41,83,100,126,100,101,52,120,77,112,54,68]` +//! `bytes_downloaded` | [`NumberOfBytes`](aquatic_udp_protocol::common::NumberOfBytes) | `0` +//! `bytes_uploaded` | [`TransactionId`](aquatic_udp_protocol::common::NumberOfBytes) | `0` +//! `event` | [`AnnounceEvent`](aquatic_udp_protocol::request::AnnounceEvent) | `Started` +//! `ip_address` | [`Ipv4Addr`](aquatic_udp_protocol::common::ConnectionId) | `None` +//! `peers_wanted` | [`NumberOfPeers`](aquatic_udp_protocol::common::NumberOfPeers) | `200` +//! `port` | [`Port`](aquatic_udp_protocol::common::Port) | `17548` +//! +//! > **NOTICE**: the `peers_wanted` field is the `num_want` field in the UDP +//! > packet. +//! +//! We are using a wrapper struct for the aquatic [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) +//! struct, because we have our internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) +//! struct. +//! +//! ```text +//! pub struct AnnounceWrapper { +//! pub announce_request: AnnounceRequest, // aquatic +//! pub info_hash: InfoHash, // our own +//! } +//! ``` +//! +//! #### Announce Response +//! +//! **Announce response (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -----------|-------------------|------------------|---------------------------------------------------------------------------------|-----------------|---------------------------- +//! 0 | [`i32`](std::i32) | `action` | The action this is a reply to. | `0x00_00_00_01` | `1`: announce; `3`: error +//! 4 | [`i32`](std::i32) | `transaction_id` | Must match the `transaction_id` sent in the announce request. | `0x00_00_00_00` | `0` +//! 8 | [`i32`](std::i32) | `interval` | The number of seconds the peer should wait until re-announcing itself. | `0x00_00_00_00` | `0` +//! 12 | [`i32`](std::i32) | `leechers` | The number of peers in the swarm that has not finished downloading. | `0x00_00_00_00` | `0` +//! 16 | [`i32`](std::i32) | `seeders` | The number of peers in the swarm that has finished downloading and are seeding. | `0x00_00_00_00` | `0` +//! | | | | | +//! 20 + 6 * n | [`i32`](std::i32) | `IP address` | The IP of a peer in the swarm. | `0x69_69_69_69` | `1768515945` +//! 24 + 6 * n | [`i16`](std::i16) | `TCP port` | The peer's listen port. | `0x44_8C` | `17548` +//! 20 + 6 * N | | | | | +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! > **NOTICE**: `IP address` should always be set to 0 when the peer is using +//! > `IPv6`. +//! +//! **Sample announce response (UDP packet)** +//! +//! UDP packet bytes (fixed part): +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] +//! Decimal: [ 0, 0, 0, 1, 162, 249, 84, 72, 0, 0, 0, 120, 0, 0, 0, 0, 0, 0, 0, 1] +//! Hex: [ 0x00, 0x00, 0x00, 0x01, 0xA2, 0xF9, 0x54, 0x48, 0x00, 0x00, 0x00, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01] +//! Param: [<------- action ------>,<-- transaction_id --->,<----- interval ------>,<----- leechers ------>,<------ seeders ------>] +//! ``` +//! +//! UDP packet fields (fixed part): +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! -----------|-------------------|------------------|---------------------|-----------------|---------------------------- +//! 0 | [`i32`](std::i32) | `action` | `[0, 0, 0, 0]` | `0x00_00_00_01` | `1`: announce; `3`: error +//! 4 | [`i32`](std::i32) | `transaction_id` | `[162,249,84,72]` | `0xA2_F9_54_48` | `-1560718264` +//! 8 | [`i32`](std::i32) | `interval` | `[0,0,0,120]` | `0x00_00_00_78` | `120` +//! 12 | [`i32`](std::i32) | `leechers` | `[0, 0, 0, 0]` | `0x00_00_00_00` | `0` +//! 16 | [`i32`](std::i32) | `seeders` | `[0, 0, 0, 1]` | `0x00_00_00_01` | `1` +//! +//! This is the fixed part of the packet. After the fixed part there is +//! dynamically generated data with the list of peers in the swarm. The list may +//! include `IPv4` or `IPv6` peers, depending on the address family of the +//! underlying UDP packet. I.e. packets from a v4 address use the v4 format, +//! those from a v6 address use the v6 format. +//! +//! UDP packet bytes (`IPv4` peer list): +//! +//! ```text +//! Offset: [ 20, 21, 22, 23, 24, 25] +//! Decimal: [ 105, 105, 105, 105, 08, 140] +//! Hex: [ 0x69, 0x69, 0x69, 0x69, 0x44, 0x8C] +//! Param: [<----- IP address ---->,<-TCP port>] +//! ``` +//! +//! > **NOTICE**: there are 6 bytes per peer (4 bytes for the `IPv4` address and +//! > 2 bytes for the TCP port). +//! +//! UDP packet fields (`IPv4` peer list): +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! ---------|-------------------|--------------|---------------------|-----------------|---------------------------- +//! 20 + 6*n | [`i32`](std::i32) | `IP address` | `[105,105,105,105]` | `0x69_69_69_69` | `1768515945` +//! 24 + 6*n | [`i16`](std::i16) | `TCP port` | `[8,140]` | `0x44_8C` | `17548` +//! 20 + 6*N | | | | | +//! +//! UDP packet bytes (`IPv6` peer list): +//! +//! ```text +//! Offset: [ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37] +//! Decimal: [ 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 08, 140] +//! Hex: [ 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x44, 0x8C] +//! Param: [<-------------------------------------------- IP address ------------------------------------->,<-TCP port>] +//! ``` +//! +//! > **NOTICE**: there are 18 bytes per peer (16 bytes for the `IPv6` address and +//! > 2 bytes for the TCP port). +//! +//! UDP packet fields (`IPv6` peer list): +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! ----------|---------------------|--------------|---------------------------------------------------------------------|-----------------------------------------------------|------------------------------------------- +//! 20 + 18*n | [`i128`](std::i128) | `IP address` | `[105,105,105,105,105,105,105,105,105,105,105,105,105,105,105,105]` | `0x69_69_69_69_69_69_69_69_69_69_69_69_69_69_69_69` | `140116268732151132014330720707198675305` +//! 24 + 18*n | [`i16`](std::i16) | `TCP port` | `[8,140]` | `0x44_8C` | `17548` +//! 20 + 18*N | | | | | +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! > **NOTICE**: the peer list does not include the peer that sent the announce +//! > request. +//! +//! **Announce response (struct)** +//! +//! The [`AnnounceResponse`](aquatic_udp_protocol::response::AnnounceResponse) +//! struct will have the following fields: +//! +//! Field | Type | Example +//! --------------------|------------------------------------------------------------------------|-------------- +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-1560718264` +//! `announce_interval` | [`AnnounceInterval`](aquatic_udp_protocol::common::AnnounceInterval) | `120` +//! `leechers` | [`NumberOfPeers`](aquatic_udp_protocol::common::NumberOfPeers) | `0` +//! `seeders` | [`NumberOfPeers`](aquatic_udp_protocol::common::NumberOfPeers) | `1` +//! `peers` | Vector of [`ResponsePeer`](aquatic_udp_protocol::common::ResponsePeer) | `[]` +//! +//! **Announce specification** +//! +//! Original specification in [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html). +//! +//! ### Scrape +//! +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) +//! for multiple torrents at the same time. +//! +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) +//! for that torrent: +//! +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) +//! +//! > **NOTICE**: up to about 74 torrents can be scraped at once. A full scrape +//! > can't be done with this protocol. This is a limitation of the UDP protocol. +//! > Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! > Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) +//! > for more information about this limitation. +//! +//! #### Scrape Request +//! +//! **Scrape request (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! ----------|-------------------|------------------|------------------------------------------------------------------------|-----------------------------------------------------------------|-------------------------------------------------- +//! 0 | [`i64`](std::i64) | `connection_id` | The `connection_id` retrieved from the establishing of the connection. | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! 8 | [`i32`](std::i32) | `action` | Action identifying the scrape request | `0x00_00_00_02` | `2` (`Scrape`) +//! 12 | [`i32`](std::i32) | `transaction_id` | Randomly generated by the client. | `0xA2_F9_54_48` | `-1560718264` +//! 16 + 20*n | 20 bytes | `info_hash` | The infohash of the torrent being scraped. | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! 16 + 20*N | | | | +//! +//! The last field (`info_hash`) is repeated for each torrent being scraped. +//! +//! Dynamic part of the UDP packet: +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! ----------|-------------------|-------------|--------------------------------------------|-----------------------------------------------------------------|--------------------------------------------------- +//! 16 + 20*n | 20 bytes | `info_hash` | The infohash of the torrent being scraped. | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! +//! **Sample scrape request (UDP packet)** +//! +//! UDP packet bytes (fixed part): +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35] +//! Decimal: [ 197, 88, 124, 9, 8, 72, 216, 55, 0, 0, 0, 2, 162, 249, 84, 72, 3, 132, 5, 72, 100, 58, 242, 167, 182, 58, 159, 92, 188, 163, 72, 188, 113, 80, 202, 58] +//! Hex: [ 0xC5, 0x58, 0x7C, 0x09, 0x08, 0x48, 0xD8, 0x37, 0x00, 0x00, 0x00, 0x02, 0xA2, 0xF9, 0x54, 0x48, 0x03, 0x84, 0x05, 0x48, 0x64, 0x3A, 0xF2, 0xA7, 0xB6, 0x3A, 0x9F, 0x5C, 0xBC, 0xA3, 0x48, 0xBC, 0x71, 0x50, 0xCA, 0x3A] +//! Param: [<--------------- connection_id --------------->,<--------- action ---->,<-- transaction_id --->,<--------------------------------------------------------- info_hash ------------------------------------------------->] +//! ``` +//! +//! UDP packet bytes (infohash list): +//! +//! ```text +//! Offset: [ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35] +//! Decimal: [ 3, 132, 5, 72, 100, 58, 242, 167, 182, 58, 159, 92, 188, 163, 72, 188, 113, 80, 202, 58] +//! Hex: [ 0x03, 0x84, 0x05, 0x48, 0x64, 0x3A, 0xF2, 0xA7, 0xB6, 0x3A, 0x9F, 0x5C, 0xBC, 0xA3, 0x48, 0xBC, 0x71, 0x50, 0xCA, 0x3A] +//! Param: [<--------------------------------------------------------- info_hash ------------------------------------------------->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes Dec (Big Endian) | Hex | Decimal +//! -------|-------------------|------------------|--------------------------------------------------------------------------|-----------------------------------------------------------------|-------------------------------------------------- +//! 0 | [`i64`](std::i64) | `connection_id` | `[197,88,124,9,8,72,216,55]` | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` +//! 4 | [`i32`](std::i32) | `action` | `[0, 0, 0, 2]` | `0x00_00_00_02` | `2` (`Scrape`) +//! 8 | [`i32`](std::i32) | `transaction_id` | `[162,249,84,72]` | `0xA2_F9_54_48` | `-1560718264` +//! 8 | 20 bytes | `info_hash` | `[3,132,5,72,100,58,242,167,182,58,159,92,188,163,72,188,113,80,202,58]` | `0x03_84_05_48_64_3A_F2_A7_B6_3A_9F_5C_BC_A3_48_BC_71_50_CA_3A` | `20071130873666512363095721859061691407221705274` +//! +//! **Scrape request (parsed struct)** +//! +//! After parsing the UDP packet, the [`ScrapeRequest`](aquatic_udp_protocol::request::ScrapeRequest) +//! struct will look like this: +//! +//! Field | Type | Example +//! -----------------|----------------------------------------------------------------|---------------------------------------------------------------------------- +//! `connection_id` | [`ConnectionId`](aquatic_udp_protocol::common::ConnectionId) | `-4226491872051668937` +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-1560718264` +//! `info_hashes` | Vector of [`InfoHash`](aquatic_udp_protocol::common::InfoHash) | `[[3,132,5,72,100,58,242,167,182,58,159,92,188,163,72,188,113,80,202,58]]` +//! +//! #### Scrape Response +//! +//! **Scrape response (UDP packet)** +//! +//! Offset | Type/Size | Name (BEP15 or libtorrent) | Description | Hex | Decimal +//! ----------|-------------------|-----------------------------|-------------------------------------------------------|-----------------|----------------- +//! 0 | [`i32`](std::i32) | `action` | Action identifying the connect request | `0x00_00_00_00` | `2` (`Scrape`) +//! 4 | [`i32`](std::i32) | `transaction_id` | Must match the `transaction_id` sent from the client. | `0xA2_F9_54_48` | `-1560718264` +//! 8 + 12*n | [`i32`](std::i32) | `seeders` or `complete` | The current number of connected seeds. | `0x00_00_00_00` | `0` +//! 12 + 12*n | [`i32`](std::i32) | `completed` or `downloaded` | The number of times this torrent has been downloaded. | `0x00_00_00_00` | `0` +//! 16 + 12*n | [`i32`](std::i32) | `leechers` or `incomplete` | The current number of connected leechers. | `0x00_00_00_00` | `0` +//! 8 + 12*N | | | | | +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! Dynamic part of the UDP packet: +//! +//! Offset | Type/Size | Name (BEP15 or libtorrent) | Description | Hex | Decimal +//! ----------|-------------------|-----------------------------|-------------------------------------------------------|-----------------|----------------- +//! 8 + 12*n | [`i32`](std::i32) | `seeders` or `complete` | The current number of connected seeds. | `0x00_00_00_00` | `0` +//! 12 + 12*n | [`i32`](std::i32) | `completed` or `downloaded` | The number of times this torrent has been downloaded. | `0x00_00_00_00` | `0` +//! 16 + 12*n | [`i32`](std::i32) | `leechers` or `incomplete` | The current number of connected leechers. | `0x00_00_00_00` | `0` +//! 8 + 12*N | | | | | +//! +//! For each info hash in the request there will be 3 32-bit integers (12 bytes) +//! in the response with the number of seeders, leechers and downloads. +//! +//! **Sample scrape response (UDP packet)** +//! +//! UDP packet bytes: +//! +//! ```text +//! Offset: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] +//! Decimal: [ 0, 0, 0, 0, 203, 5, 94, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] +//! Hex: [0x00, 0x00, 0x00, 0x00, 0xCB, 0x05, 0x5E, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] +//! Param: [<------ action ------>,<-- transaction_id --->,<------ seeders ------>,<----- completed ----->,<------ leechers ----->] +//! ``` +//! +//! UDP packet fields: +//! +//! Offset | Type/Size | Name | Bytes (Big Endian) | Hex | Decimal +//! -------|-------------------|------------------|--------------------|------------------|---------------- +//! 0 | [`i32`](std::i32) | `action` | [0, 0, 0, 2] | `0x00_00_00_02` | `2` (`Scrape`) +//! 4 | [`i32`](std::i32) | `transaction_id` | [203, 5, 94, 7] | `0xA2_F9_54_48` | `-1560718264` +//! 8 | [`i32`](std::i32) | `seeders` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! 12 | [`i32`](std::i32) | `completed` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! 16 | [`i32`](std::i32) | `leechers` | [0, 0, 0, 0] | `0x00_00_00_00` | `0` +//! +//! > **NOTICE**: `Hex` column is a signed 2's complement. +//! +//! **Scrape response (struct)** +//! +//! Before building the UDP packet, the [`ScrapeResponse`](aquatic_udp_protocol::response::ScrapeResponse) +//! struct will look like this: +//! +//! Field | Type | Example +//! -----------------|-------------------------------------------------------------------------------------------------|--------------- +//! `transaction_id` | [`TransactionId`](aquatic_udp_protocol::common::TransactionId) | `-1560718264` +//! `torrent_stats` | Vector of [`TorrentScrapeStatistics`](aquatic_udp_protocol::response::TorrentScrapeStatistics) | `[]` +//! +//! **Scrape specification** +//! +//! Original specification in [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html). +//! +//! ## Errors +//! +//! ### Error Response +//! +//! **Error response (UDP packet)** +//! +//! Offset | Type/Size | Name | Description | Hex | Decimal +//! -------|-------------------|------------------|-------------------------------------------------------|-----------------------------|----------------------- +//! 0 | [`i32`](std::i32) | `action` | Action identifying the error response. | `0x00_00_00_03` | `3` +//! 4 | [`i32`](std::i32) | `transaction_id` | Must match the `transaction_id` sent from the client. | `0xCB_05_5E_07` | `-888840697` +//! 8 | N Bytes | `error_string` | Error description. | | +//! +//! ## Extensions +//! +//! Extensions described in [BEP 41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html) +//! are not supported yet. +//! +//! ## Links +//! +//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html). +//! - [BEP 41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html). +//! - [libtorrent - Bittorrent UDP-tracker protocol extension](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). +//! - [XBTT Tracker. UDP tracker protocol](https://xbtt.sourceforge.net/udp_tracker_protocol.html). +//! - [Wikipedia: UDP tracker](https://en.wikipedia.org/wiki/UDP_tracker). +//! +//! ## Credits +//! +//! [Bittorrent UDP-tracker protocol extension](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html) +//! documentation by [Arvid Norberg](https://github.com/arvidn) was very +//! supportive in the development of this documentation. Some descriptions were +//! taken from the [libtorrent](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). + +use std::net::SocketAddr; + +pub mod connection_cookie; +pub mod error; +pub mod handlers; +pub mod logging; +pub mod peer_builder; +pub mod server; + +pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; + +/// Number of bytes. +pub type Bytes = u64; +/// The port the peer is listening on. +pub type Port = u16; +/// The transaction id. A random number generated byt the peer that is used to +/// match requests and responses. +pub type TransactionId = i64; + +#[derive(Clone, Debug)] +pub struct RawRequest { + payload: Vec, + from: SocketAddr, +} diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs new file mode 100644 index 000000000..a42ddfaa5 --- /dev/null +++ b/src/servers/udp/peer_builder.rs @@ -0,0 +1,26 @@ +//! Logic to extract the peer info from the announce request. +use std::net::{IpAddr, SocketAddr}; + +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::peer; + +use crate::CurrentClock; + +/// Extracts the [`peer::Peer`] info from the +/// announce request. +/// +/// # Arguments +/// +/// * `peer_ip` - The real IP address of the peer, not the one in the announce request. +#[must_use] +pub fn from_request(announce_request: &aquatic_udp_protocol::AnnounceRequest, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { + peer_id: announce_request.peer_id, + peer_addr: SocketAddr::new(*peer_ip, announce_request.port.0.into()), + updated: CurrentClock::now(), + uploaded: announce_request.bytes_uploaded, + downloaded: announce_request.bytes_downloaded, + left: announce_request.bytes_left, + event: announce_request.event.into(), + } +} diff --git a/src/servers/udp/server/bound_socket.rs b/src/servers/udp/server/bound_socket.rs new file mode 100644 index 000000000..658589aa6 --- /dev/null +++ b/src/servers/udp/server/bound_socket.rs @@ -0,0 +1,70 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::ops::Deref; + +use url::Url; + +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. +pub struct BoundSocket { + socket: tokio::net::UdpSocket, +} + +impl BoundSocket { + /// # Errors + /// + /// Will return an error if the socket can't be bound the the provided address. + pub async fn new(addr: SocketAddr) -> Result> { + let bind_addr = format!("udp://{addr}"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, bind_addr, "UdpSocket::new (binding)"); + + let socket = tokio::net::UdpSocket::bind(addr).await; + + let socket = match socket { + Ok(socket) => socket, + Err(e) => Err(e)?, + }; + + let local_addr = format!("udp://{}", socket.local_addr()?); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpSocket::new (bound)"); + + Ok(Self { socket }) + } + + /// # Panics + /// + /// Will panic if the socket can't get the address it was bound to. + #[must_use] + pub fn address(&self) -> SocketAddr { + self.socket.local_addr().expect("it should get local address") + } + + /// # Panics + /// + /// Will panic if the address the socket was bound to is not a valid address + /// to be used in a URL. + #[must_use] + pub fn url(&self) -> Url { + Url::parse(&format!("udp://{}", self.address())).expect("UDP socket address should be valid") + } +} + +impl Deref for BoundSocket { + type Target = tokio::net::UdpSocket; + + fn deref(&self) -> &Self::Target { + &self.socket + } +} + +impl Debug for BoundSocket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let local_addr = match self.socket.local_addr() { + Ok(socket) => format!("Receiving From: {socket}"), + Err(err) => format!("Socket Broken: {err}"), + }; + + f.debug_struct("UdpSocket").field("addr", &local_addr).finish_non_exhaustive() + } +} diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs new file mode 100644 index 000000000..c9ad213f6 --- /dev/null +++ b/src/servers/udp/server/launcher.rs @@ -0,0 +1,160 @@ +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use derive_more::Constructor; +use futures_util::StreamExt; +use tokio::select; +use tokio::sync::oneshot; +use tracing::instrument; + +use super::request_buffer::ActiveRequests; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::logging::STARTED_ON; +use crate::servers::registar::ServiceHealthCheckJob; +use crate::servers::signals::{shutdown_signal_with_message, Halted}; +use crate::servers::udp::server::bound_socket::BoundSocket; +use crate::servers::udp::server::processor::Processor; +use crate::servers::udp::server::receiver::Receiver; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; +use crate::shared::bit_torrent::tracker::udp::client::check; + +/// A UDP server instance launcher. +#[derive(Constructor)] +pub struct Launcher; + +impl Launcher { + /// It starts the UDP server instance with graceful shutdown. + /// + /// # Panics + /// + /// It panics if unable to bind to udp socket, and get the address from the udp socket. + /// It also panics if unable to send address of socket. + #[instrument(skip(tracker, bind_to, tx_start, rx_halt))] + pub async fn run_with_graceful_shutdown( + tracker: Arc, + bind_to: SocketAddr, + tx_start: oneshot::Sender, + rx_halt: oneshot::Receiver, + ) { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); + + let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) + .await + .expect("it should bind to the socket within five seconds"); + + let bound_socket = match socket { + Ok(socket) => socket, + Err(e) => { + tracing::error!(target: UDP_TRACKER_LOG_TARGET, addr = %bind_to, err = %e, "Udp::run_with_graceful_shutdown panic! (error when building socket)" ); + panic!("could not bind to socket!"); + } + }; + + let address = bound_socket.address(); + let local_udp_url = bound_socket.url().to_string(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "{STARTED_ON}: {local_udp_url}"); + + let receiver = Receiver::new(bound_socket.into()); + + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (spawning main loop)"); + + let running = { + let local_addr = local_udp_url.clone(); + tokio::task::spawn(async move { + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); + let () = Self::run_udp_server_main(receiver, tracker.clone()).await; + }) + }; + + tx_start + .send(Started { address }) + .expect("the UDP Tracker service should not be dropped"); + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (started)"); + + let stop = running.abort_handle(); + + let halt_task = tokio::task::spawn(shutdown_signal_with_message( + rx_halt, + format!("Halting UDP Service Bound to Socket: {address}"), + )); + + select! { + _ = running => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (stopped)"); }, + _ = halt_task => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (halting)"); } + } + stop.abort(); + + tokio::task::yield_now().await; // lets allow the other threads to complete. + } + + #[must_use] + #[instrument(skip(binding))] + pub fn check(binding: &SocketAddr) -> ServiceHealthCheckJob { + let binding = *binding; + let info = format!("checking the udp tracker health check at: {binding}"); + + let job = tokio::spawn(async move { check(&binding).await }); + + ServiceHealthCheckJob::new(binding, info, job) + } + + #[instrument(skip(receiver, tracker))] + async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc) { + let active_requests = &mut ActiveRequests::default(); + + let addr = receiver.bound_socket_address(); + let local_addr = format!("udp://{addr}"); + + loop { + let processor = Processor::new(receiver.socket.clone(), tracker.clone()); + + if let Some(req) = { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); + receiver.next().await + } { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop (in)"); + + let req = match req { + Ok(req) => req, + Err(e) => { + if e.kind() == std::io::ErrorKind::Interrupted { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop (interrupted)"); + return; + } + tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop break: (got error)"); + break; + } + }; + + // We spawn the new task even if there active requests buffer is + // full. This could seem counterintuitive because we are accepting + // more request and consuming more memory even if the server is + // already busy. However, we "force_push" the new tasks in the + // buffer. That means, in the worst scenario we will abort a + // running task to make place for the new task. + // + // Once concern could be to reach an starvation point were we + // are only adding and removing tasks without given them the + // chance to finish. However, the buffer is yielding before + // aborting one tasks, giving it the chance to finish. + let abort_handle: tokio::task::AbortHandle = tokio::task::spawn(processor.process_request(req)).abort_handle(); + + if abort_handle.is_finished() { + continue; + } + + active_requests.force_push(abort_handle, &local_addr).await; + } else { + tokio::task::yield_now().await; + + // the request iterator returned `None`. + tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server breaking: (ran dry, should not happen in production!)"); + break; + } + } + } +} diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs new file mode 100644 index 000000000..d81624cb2 --- /dev/null +++ b/src/servers/udp/server/mod.rs @@ -0,0 +1,178 @@ +//! Module to handle the UDP server instances. +use std::fmt::Debug; + +use derive_more::derive::Display; +use thiserror::Error; + +use super::RawRequest; + +pub mod bound_socket; +pub mod launcher; +pub mod processor; +pub mod receiver; +pub mod request_buffer; +pub mod spawner; +pub mod states; + +/// Error that can occur when starting or stopping the UDP server. +/// +/// Some errors triggered while starting the server are: +/// +/// - The server cannot bind to the given address. +/// - It cannot get the bound address. +/// +/// Some errors triggered while stopping the server are: +/// +/// - The [`Server`] cannot send the shutdown signal to the spawned UDP service thread. +#[derive(Debug, Error)] +pub enum UdpError { + #[error("Any error to do with the socket")] + FailedToBindSocket(std::io::Error), + + #[error("Any error to do with starting or stopping the sever")] + FailedToStartOrStopServer(String), +} + +/// A UDP server. +/// +/// It's an state machine. Configurations cannot be changed. This struct +/// represents concrete configuration and state. It allows to start and stop the +/// server but always keeping the same configuration. +/// +/// > **NOTICE**: if the configurations changes after running the server it will +/// > reset to the initial value after stopping the server. This struct is not +/// > intended to persist configurations between runs. +#[allow(clippy::module_name_repetitions)] +#[derive(Debug, Display)] +pub struct Server +where + S: std::fmt::Debug + std::fmt::Display, +{ + /// The state of the server: `running` or `stopped`. + pub state: S, +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::time::Duration; + + use torrust_tracker_test_helpers::configuration::ephemeral_public; + + use super::spawner::Spawner; + use super::Server; + use crate::bootstrap::app::initialize_with_configuration; + use crate::servers::registar::Registar; + + #[tokio::test] + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_public()); + let tracker = initialize_with_configuration(&cfg); + let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); + let config = &udp_trackers[0]; + let bind_to = config.bind_address; + let register = &Registar::default(); + + let stopped = Server::new(Spawner::new(bind_to)); + + let started = stopped + .start(tracker, register.give_form()) + .await + .expect("it should start the server"); + + let stopped = started.stop().await.expect("it should stop the server"); + + tokio::time::sleep(Duration::from_secs(1)).await; + + assert_eq!(stopped.state.spawner.bind_to, bind_to); + } + + #[tokio::test] + async fn it_should_be_able_to_start_and_stop_with_wait() { + let cfg = Arc::new(ephemeral_public()); + let tracker = initialize_with_configuration(&cfg); + let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); + let bind_to = config.bind_address; + let register = &Registar::default(); + + let stopped = Server::new(Spawner::new(bind_to)); + + let started = stopped + .start(tracker, register.give_form()) + .await + .expect("it should start the server"); + + tokio::time::sleep(Duration::from_secs(1)).await; + + let stopped = started.stop().await.expect("it should stop the server"); + + tokio::time::sleep(Duration::from_secs(1)).await; + + assert_eq!(stopped.state.spawner.bind_to, bind_to); + } +} + +/// Todo: submit test to tokio documentation. +#[cfg(test)] +mod test_tokio { + use std::sync::Arc; + use std::time::Duration; + + use tokio::sync::Barrier; + use tokio::task::JoinSet; + + #[tokio::test] + async fn test_barrier_with_aborted_tasks() { + // Create a barrier that requires 10 tasks to proceed. + let barrier = Arc::new(Barrier::new(10)); + let mut tasks = JoinSet::default(); + let mut handles = Vec::default(); + + // Set Barrier to 9/10. + for _ in 0..9 { + let c = barrier.clone(); + handles.push(tasks.spawn(async move { + c.wait().await; + })); + } + + // Abort two tasks: Barrier: 7/10. + for _ in 0..2 { + if let Some(handle) = handles.pop() { + handle.abort(); + } + } + + // Spawn a single task: Barrier 8/10. + let c = barrier.clone(); + handles.push(tasks.spawn(async move { + c.wait().await; + })); + + // give a chance fro the barrier to release. + tokio::time::sleep(Duration::from_millis(50)).await; + + // assert that the barrier isn't removed, i.e. 8, not 10. + for h in &handles { + assert!(!h.is_finished()); + } + + // Spawn two more tasks to trigger the barrier release: Barrier 10/10. + for _ in 0..2 { + let c = barrier.clone(); + handles.push(tasks.spawn(async move { + c.wait().await; + })); + } + + // give a chance fro the barrier to release. + tokio::time::sleep(Duration::from_millis(50)).await; + + // assert that the barrier has been triggered + for h in &handles { + assert!(h.is_finished()); + } + + tasks.shutdown().await; + } +} diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs new file mode 100644 index 000000000..9fa28a44d --- /dev/null +++ b/src/servers/udp/server/processor.rs @@ -0,0 +1,72 @@ +use std::io::Cursor; +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::Response; +use tracing::{instrument, Level}; + +use super::bound_socket::BoundSocket; +use crate::core::Tracker; +use crate::servers::udp::{handlers, RawRequest}; + +pub struct Processor { + socket: Arc, + tracker: Arc, +} + +impl Processor { + pub fn new(socket: Arc, tracker: Arc) -> Self { + Self { socket, tracker } + } + + #[instrument(skip(self, request))] + pub async fn process_request(self, request: RawRequest) { + let from = request.from; + let response = handlers::handle_packet(request, &self.tracker, self.socket.address()).await; + self.send_response(from, response).await; + } + + #[instrument(skip(self))] + async fn send_response(self, target: SocketAddr, response: Response) { + tracing::debug!("send response"); + + let response_type = match &response { + Response::Connect(_) => "Connect".to_string(), + Response::AnnounceIpv4(_) => "AnnounceIpv4".to_string(), + Response::AnnounceIpv6(_) => "AnnounceIpv6".to_string(), + Response::Scrape(_) => "Scrape".to_string(), + Response::Error(e) => format!("Error: {e:?}"), + }; + + let mut writer = Cursor::new(Vec::with_capacity(200)); + + match response.write_bytes(&mut writer) { + Ok(()) => { + let bytes_count = writer.get_ref().len(); + let payload = writer.get_ref(); + + let () = match self.send_packet(&target, payload).await { + Ok(sent_bytes) => { + if tracing::event_enabled!(Level::TRACE) { + tracing::debug!(%bytes_count, %sent_bytes, ?payload, "sent {response_type}"); + } else { + tracing::debug!(%bytes_count, %sent_bytes, "sent {response_type}"); + } + } + Err(error) => tracing::warn!(%bytes_count, %error, ?payload, "failed to send"), + }; + } + Err(e) => { + tracing::error!(%e, "error"); + } + } + } + + #[instrument(skip(self))] + async fn send_packet(&self, target: &SocketAddr, payload: &[u8]) -> std::io::Result { + tracing::trace!("send packet"); + + // doesn't matter if it reaches or not + self.socket.send_to(payload, target).await + } +} diff --git a/src/servers/udp/server/receiver.rs b/src/servers/udp/server/receiver.rs new file mode 100644 index 000000000..0176930a4 --- /dev/null +++ b/src/servers/udp/server/receiver.rs @@ -0,0 +1,54 @@ +use std::cell::RefCell; +use std::net::SocketAddr; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use futures::Stream; + +use super::bound_socket::BoundSocket; +use super::RawRequest; +use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; + +pub struct Receiver { + pub socket: Arc, + data: RefCell<[u8; MAX_PACKET_SIZE]>, +} + +impl Receiver { + #[must_use] + pub fn new(bound_socket: Arc) -> Self { + Receiver { + socket: bound_socket, + data: RefCell::new([0; MAX_PACKET_SIZE]), + } + } + + pub fn bound_socket_address(&self) -> SocketAddr { + self.socket.address() + } +} + +impl Stream for Receiver { + type Item = std::io::Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut buf = *self.data.borrow_mut(); + let mut buf = tokio::io::ReadBuf::new(&mut buf); + + let Poll::Ready(ready) = self.socket.poll_recv_from(cx, &mut buf) else { + return Poll::Pending; + }; + + let res = match ready { + Ok(from) => { + let payload = buf.filled().to_vec(); + let request = RawRequest { payload, from }; + Some(Ok(request)) + } + Err(err) => Some(Err(err)), + }; + + Poll::Ready(res) + } +} diff --git a/src/servers/udp/server/request_buffer.rs b/src/servers/udp/server/request_buffer.rs new file mode 100644 index 000000000..ffbd9565d --- /dev/null +++ b/src/servers/udp/server/request_buffer.rs @@ -0,0 +1,140 @@ +use ringbuf::traits::{Consumer, Observer, Producer}; +use ringbuf::StaticRb; +use tokio::task::AbortHandle; + +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// A ring buffer for managing active UDP request abort handles. +/// +/// The `ActiveRequests` struct maintains a fixed-size ring buffer of abort +/// handles for UDP request processor tasks. It ensures that at most 50 requests +/// are handled concurrently, and provides mechanisms to handle buffer overflow +/// by removing finished or oldest unfinished tasks. +#[derive(Default)] +pub struct ActiveRequests { + rb: StaticRb, // The number of requests handled simultaneously. +} + +impl std::fmt::Debug for ActiveRequests { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let (left, right) = &self.rb.as_slices(); + let dbg = format!("capacity: {}, left: {left:?}, right: {right:?}", &self.rb.capacity()); + f.debug_struct("ActiveRequests").field("rb", &dbg).finish() + } +} + +impl Drop for ActiveRequests { + fn drop(&mut self) { + for h in self.rb.pop_iter() { + if !h.is_finished() { + h.abort(); + } + } + } +} + +impl ActiveRequests { + /// Inserts an abort handle for a UDP request processor task. + /// + /// If the buffer is full, this method attempts to make space by: + /// + /// 1. Removing finished tasks. + /// 2. Removing the oldest unfinished task if no finished tasks are found. + /// + /// # Panics + /// + /// This method will panic if it cannot make space for adding a new handle. + /// + /// # Arguments + /// + /// * `abort_handle` - The `AbortHandle` for the UDP request processor task. + /// * `local_addr` - A string slice representing the local address for logging. + pub async fn force_push(&mut self, new_task: AbortHandle, local_addr: &str) { + // Attempt to add the new handle to the buffer. + match self.rb.try_push(new_task) { + Ok(()) => { + // Successfully added the task, no further action needed. + } + Err(new_task) => { + // Buffer is full, attempt to make space. + + let mut finished: u64 = 0; + let mut unfinished_task = None; + + for old_task in self.rb.pop_iter() { + // We found a finished tasks ... increase the counter and + // continue searching for more and ... + if old_task.is_finished() { + finished += 1; + continue; + } + + // The current removed tasks is not finished. + + // Give it a second chance to finish. + tokio::task::yield_now().await; + + // Recheck if it finished ... increase the counter and + // continue searching for more and ... + if old_task.is_finished() { + finished += 1; + continue; + } + + // At this point we found a "definitive" unfinished task. + + // Log unfinished task. + tracing::debug!( + target: UDP_TRACKER_LOG_TARGET, + local_addr, + removed_count = finished, + "Udp::run_udp_server::loop (got unfinished task)" + ); + + // If no finished tasks were found, abort the current + // unfinished task. + if finished == 0 { + // We make place aborting this task. + old_task.abort(); + + tracing::warn!( + target: UDP_TRACKER_LOG_TARGET, + local_addr, + "Udp::run_udp_server::loop aborting request: (no finished tasks)" + ); + + break; + } + + // At this point we found at least one finished task, but the + // current one is not finished and it was removed from the + // buffer, so we need to re-insert in in the buffer. + + // Save the unfinished task for re-entry. + unfinished_task = Some(old_task); + } + + // After this point there can't be a race condition because only + // one thread owns the active buffer. There is no way for the + // buffer to be full again. That means the "expects" should + // never happen. + + // Reinsert the unfinished task if any. + if let Some(h) = unfinished_task { + self.rb.try_push(h).expect("it was previously inserted"); + } + + // Insert the new task. + // + // Notice that space has already been made for this new task in + // the buffer. One or many old task have already been finished + // or yielded, freeing space in the buffer. Or a single + // unfinished task has been aborted to make space for this new + // task. + if !new_task.is_finished() { + self.rb.try_push(new_task).expect("it should have space for this new task."); + } + } + }; + } +} diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs new file mode 100644 index 000000000..dea293ad7 --- /dev/null +++ b/src/servers/udp/server/spawner.rs @@ -0,0 +1,40 @@ +//! A thin wrapper for tokio spawn to launch the UDP server launcher as a new task. +use std::net::SocketAddr; +use std::sync::Arc; + +use derive_more::derive::Display; +use derive_more::Constructor; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; + +use super::launcher::Launcher; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::signals::Halted; + +#[derive(Constructor, Copy, Clone, Debug, Display)] +#[display("(with socket): {bind_to}")] +pub struct Spawner { + pub bind_to: SocketAddr, +} + +impl Spawner { + /// It spawns a new task to run the UDP server instance. + /// + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. + pub fn spawn_launcher( + &self, + tracker: Arc, + tx_start: oneshot::Sender, + rx_halt: oneshot::Receiver, + ) -> JoinHandle { + let spawner = Self::new(self.bind_to); + + tokio::spawn(async move { + Launcher::run_with_graceful_shutdown(tracker, spawner.bind_to, tx_start, rx_halt).await; + spawner + }) + } +} diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs new file mode 100644 index 000000000..e90c4da54 --- /dev/null +++ b/src/servers/udp/server/states.rs @@ -0,0 +1,121 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::sync::Arc; + +use derive_more::derive::Display; +use derive_more::Constructor; +use tokio::task::JoinHandle; +use tracing::{instrument, Level}; + +use super::spawner::Spawner; +use super::{Server, UdpError}; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; +use crate::servers::signals::Halted; +use crate::servers::udp::server::launcher::Launcher; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// A UDP server instance controller with no UDP instance running. +#[allow(clippy::module_name_repetitions)] +pub type StoppedUdpServer = Server; + +/// A UDP server instance controller with a running UDP instance. +#[allow(clippy::module_name_repetitions)] +pub type RunningUdpServer = Server; + +/// A stopped UDP server state. +#[derive(Debug, Display)] +#[display("Stopped: {spawner}")] +pub struct Stopped { + pub spawner: Spawner, +} + +/// A running UDP server state. +#[derive(Debug, Display, Constructor)] +#[display("Running (with local address): {local_addr}")] +pub struct Running { + /// The address where the server is bound. + pub local_addr: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: JoinHandle, +} + +impl Server { + /// Creates a new `UdpServer` instance in `stopped`state. + #[must_use] + pub fn new(spawner: Spawner) -> Self { + Self { + state: Stopped { spawner }, + } + } + + /// It starts the server and returns a `UdpServer` controller in `running` + /// state. + /// + /// # Errors + /// + /// Will return `Err` if UDP can't bind to given bind address. + /// + /// # Panics + /// + /// It panics if unable to receive the bound socket address from service. + /// + #[instrument(skip(self, tracker, form), err, ret(Display, level = Level::INFO))] + pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, std::io::Error> { + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); + + // May need to wrap in a task to about a tokio bug. + let task = self.state.spawner.spawn_launcher(tracker, tx_start, rx_halt); + + let local_addr = rx_start.await.expect("it should be able to start the service").address; + + form.send(ServiceRegistration::new(local_addr, Launcher::check)) + .expect("it should be able to send service registration"); + + let running_udp_server: Server = Server { + state: Running { + local_addr, + halt_task: tx_halt, + task, + }, + }; + + let local_addr = format!("udp://{local_addr}"); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpServer::start (running)"); + + Ok(running_udp_server) + } +} + +impl Server { + /// It stops the server and returns a `UdpServer` controller in `stopped` + /// state. + /// + /// # Errors + /// + /// Will return `Err` if the oneshot channel to send the stop signal + /// has already been called once. + /// + /// # Panics + /// + /// It panics if unable to shutdown service. + #[instrument(skip(self), err, ret(Display, level = Level::INFO))] + pub async fn stop(self) -> Result, UdpError> { + self.state + .halt_task + .send(Halted::Normal) + .map_err(|e| UdpError::FailedToStartOrStopServer(e.to_string()))?; + + let launcher = self.state.task.await.expect("it should shutdown service"); + + let stopped_api_server: Server = Server { + state: Stopped { spawner: launcher }, + }; + + Ok(stopped_api_server) + } +} diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs new file mode 100644 index 000000000..46026ac47 --- /dev/null +++ b/src/shared/bit_torrent/common.rs @@ -0,0 +1,22 @@ +//! `BitTorrent` protocol primitive types +//! +//! [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) + +/// The maximum number of torrents that can be returned in an `scrape` response. +/// +/// The [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +/// defines this limit: +/// +/// "Up to about 74 torrents can be scraped at once. A full scrape can't be done +/// with this protocol." +/// +/// The [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +/// does not specifically mention this limit, but the limit is being used for +/// both the UDP and HTTP trackers since it's applied at the domain level. +pub const MAX_SCRAPE_TORRENTS: u8 = 74; + +/// HTTP tracker authentication key length. +/// +/// For more information see function [`generate_key`](crate::core::auth::generate_key) to generate the +/// [`PeerKey`](crate::core::auth::PeerKey). +pub const AUTH_KEY_LENGTH: usize = 32; diff --git a/src/shared/bit_torrent/info_hash.rs b/src/shared/bit_torrent/info_hash.rs new file mode 100644 index 000000000..506c37758 --- /dev/null +++ b/src/shared/bit_torrent/info_hash.rs @@ -0,0 +1,288 @@ +//! A `BitTorrent` `InfoHash`. It's a unique identifier for a `BitTorrent` torrent. +//! +//! "The 20-byte sha1 hash of the bencoded form of the info value +//! from the metainfo file." +//! +//! See [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! for the official specification. +//! +//! This modules provides a type that can be used to represent infohashes. +//! +//! > **NOTICE**: It only supports Info Hash v1. +//! +//! Typically infohashes are represented as hex strings, but internally they are +//! a 20-byte array. +//! +//! # Calculating the info-hash of a torrent file +//! +//! A sample torrent: +//! +//! - Torrent file: `mandelbrot_2048x2048_infohash_v1.png.torrent` +//! - File: `mandelbrot_2048x2048.png` +//! - Info Hash v1: `5452869be36f9f3350ccee6b4544e7e76caaadab` +//! - Sha1 hash of the info dictionary: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` +//! +//! A torrent file is a binary file encoded with [Bencode encoding](https://en.wikipedia.org/wiki/Bencode): +//! +//! ```text +//! 0000000: 6431 303a 6372 6561 7465 6420 6279 3138 d10:created by18 +//! 0000010: 3a71 4269 7474 6f72 7265 6e74 2076 342e :qBittorrent v4. +//! 0000020: 342e 3131 333a 6372 6561 7469 6f6e 2064 4.113:creation d +//! 0000030: 6174 6569 3136 3739 3637 3436 3238 6534 atei1679674628e4 +//! 0000040: 3a69 6e66 6f64 363a 6c65 6e67 7468 6931 :infod6:lengthi1 +//! 0000050: 3732 3230 3465 343a 6e61 6d65 3234 3a6d 72204e4:name24:m +//! 0000060: 616e 6465 6c62 726f 745f 3230 3438 7832 andelbrot_2048x2 +//! 0000070: 3034 382e 706e 6731 323a 7069 6563 6520 048.png12:piece +//! 0000080: 6c65 6e67 7468 6931 3633 3834 6536 3a70 lengthi16384e6:p +//! 0000090: 6965 6365 7332 3230 3a7d 9171 0d9d 4dba ieces220:}.q..M. +//! 00000a0: 889b 5420 54d5 2672 8d5a 863f e121 df77 ..T T.&r.Z.?.!.w +//! 00000b0: c7f7 bb6c 7796 2166 2538 c5d9 cdab 8b08 ...lw.!f%8...... +//! 00000c0: ef8c 249b b2f5 c4cd 2adf 0bc0 0cf0 addf ..$.....*....... +//! 00000d0: 7290 e5b6 414c 236c 479b 8e9f 46aa 0c0d r...AL#lG...F... +//! 00000e0: 8ed1 97ff ee68 8b5f 34a3 87d7 71c5 a6f9 .....h._4...q... +//! 00000f0: 8e2e a631 7cbd f0f9 e223 f9cc 80af 5400 ...1|....#....T. +//! 0000100: 04f9 8569 1c77 89c1 764e d6aa bf61 a6c2 ...i.w..vN...a.. +//! 0000110: 8099 abb6 5f60 2f40 a825 be32 a33d 9d07 ...._`/@.%.2.=.. +//! 0000120: 0c79 6898 d49d 6349 af20 5866 266f 986b .yh...cI. Xf&o.k +//! 0000130: 6d32 34cd 7d08 155e 1ad0 0009 57ab 303b m24.}..^....W.0; +//! 0000140: 2060 c1dc 1287 d6f3 e745 4f70 6709 3631 `.......EOpg.61 +//! 0000150: 55f2 20f6 6ca5 156f 2c89 9569 1653 817d U. .l..o,..i.S.} +//! 0000160: 31f1 b6bd 3742 cc11 0bb2 fc2b 49a5 85b6 1...7B.....+I... +//! 0000170: fc76 7444 9365 65 .vtD.ee +//! ``` +//! +//! You can generate that output with the command: +//! +//! ```text +//! xxd mandelbrot_2048x2048_infohash_v1.png.torrent +//! ``` +//! +//! And you can show only the bytes (hexadecimal): +//! +//! ```text +//! 6431303a6372656174656420627931383a71426974746f7272656e742076 +//! 342e342e3131333a6372656174696f6e2064617465693136373936373436 +//! 323865343a696e666f64363a6c656e6774686931373232303465343a6e61 +//! 6d6532343a6d616e64656c62726f745f3230343878323034382e706e6731 +//! 323a7069656365206c656e67746869313633383465363a70696563657332 +//! 32303a7d91710d9d4dba889b542054d526728d5a863fe121df77c7f7bb6c +//! 779621662538c5d9cdab8b08ef8c249bb2f5c4cd2adf0bc00cf0addf7290 +//! e5b6414c236c479b8e9f46aa0c0d8ed197ffee688b5f34a387d771c5a6f9 +//! 8e2ea6317cbdf0f9e223f9cc80af540004f985691c7789c1764ed6aabf61 +//! a6c28099abb65f602f40a825be32a33d9d070c796898d49d6349af205866 +//! 266f986b6d3234cd7d08155e1ad0000957ab303b2060c1dc1287d6f3e745 +//! 4f706709363155f220f66ca5156f2c8995691653817d31f1b6bd3742cc11 +//! 0bb2fc2b49a585b6fc767444936565 +//! ``` +//! +//! You can generate that output with the command: +//! +//! ```text +//! `xxd -ps mandelbrot_2048x2048_infohash_v1.png.torrent`. +//! ``` +//! +//! The same data can be represented in a JSON format: +//! +//! ```json +//! { +//! "created by": "qBittorrent v4.4.1", +//! "creation date": 1679674628, +//! "info": { +//! "length": 172204, +//! "name": "mandelbrot_2048x2048.png", +//! "piece length": 16384, +//! "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" +//! } +//! } +//! ``` +//! +//! The JSON object was generated with: +//! +//! As you can see, there is a `info` attribute: +//! +//! ```json +//! { +//! "length": 172204, +//! "name": "mandelbrot_2048x2048.png", +//! "piece length": 16384, +//! "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" +//! } +//! ``` +//! +//! The infohash is the [SHA1](https://en.wikipedia.org/wiki/SHA-1) hash +//! of the `info` attribute. That is, the SHA1 hash of: +//! +//! ```text +//! 64363a6c656e6774686931373232303465343a6e61 +//! d6532343a6d616e64656c62726f745f3230343878323034382e706e6731 +//! 23a7069656365206c656e67746869313633383465363a70696563657332 +//! 2303a7d91710d9d4dba889b542054d526728d5a863fe121df77c7f7bb6c +//! 79621662538c5d9cdab8b08ef8c249bb2f5c4cd2adf0bc00cf0addf7290 +//! 5b6414c236c479b8e9f46aa0c0d8ed197ffee688b5f34a387d771c5a6f9 +//! e2ea6317cbdf0f9e223f9cc80af540004f985691c7789c1764ed6aabf61 +//! 6c28099abb65f602f40a825be32a33d9d070c796898d49d6349af205866 +//! 66f986b6d3234cd7d08155e1ad0000957ab303b2060c1dc1287d6f3e745 +//! f706709363155f220f66ca5156f2c8995691653817d31f1b6bd3742cc11 +//! bb2fc2b49a585b6fc7674449365 +//! ``` +//! +//! You can hash that byte string with +//! +//! The result is a 20-char string: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` + +use torrust_tracker_primitives::info_hash::InfoHash; + +pub mod fixture { + use std::hash::{DefaultHasher, Hash, Hasher}; + + use super::InfoHash; + + /// Generate as semi-stable pseudo-random infohash + /// + /// Note: If the [`DefaultHasher`] implementation changes + /// so will the resulting info-hashes. + /// + /// The results should not be relied upon between versions. + #[must_use] + pub fn gen_seeded_infohash(seed: &u64) -> InfoHash { + let mut buf_a: [[u8; 8]; 4] = Default::default(); + let mut buf_b = InfoHash::default(); + + let mut hasher = DefaultHasher::new(); + seed.hash(&mut hasher); + + for u in &mut buf_a { + seed.hash(&mut hasher); + *u = hasher.finish().to_le_bytes(); + } + + for (a, b) in buf_a.iter().flat_map(|a| a.iter()).zip(buf_b.0.iter_mut()) { + *b = *a; + } + + buf_b + } +} + +#[cfg(test)] +mod tests { + + use std::str::FromStr; + + use serde::{Deserialize, Serialize}; + use serde_json::json; + + use super::InfoHash; + + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] + struct ContainingInfoHash { + pub info_hash: InfoHash, + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); + assert!(info_hash.is_ok()); + } + + #[test] + fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { + let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { + let info_hash = InfoHash::from_str(&"F".repeat(39)); + assert!(info_hash.is_err()); + + let info_hash = InfoHash::from_str(&"F".repeat(41)); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + let output = format!("{info_hash}"); + + assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_should_return_its_a_40_utf8_lowercased_char_hex_representations_as_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + assert_eq!(info_hash.to_hex_string(), "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { + let info_hash: InfoHash = [255u8; 20].as_slice().into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { + let info_hash: InfoHash = [255u8; 20].into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_byte_vector() { + let info_hash: InfoHash = [255u8; 20].to_vec().try_into().unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_less_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 19].to_vec()).is_err()); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_more_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 21].to_vec()).is_err()); + } + + #[test] + fn an_info_hash_can_be_serialized() { + let s = ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + }; + + let json_serialized_value = serde_json::to_string(&s).unwrap(); + + assert_eq!( + json_serialized_value, + r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# + ); + } + + #[test] + fn an_info_hash_can_be_deserialized() { + let json = json!({ + "info_hash": "ffffffffffffffffffffffffffffffffffffffff", + }); + + let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); + + assert_eq!( + s, + ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + } + ); + } +} diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs new file mode 100644 index 000000000..8074661be --- /dev/null +++ b/src/shared/bit_torrent/mod.rs @@ -0,0 +1,72 @@ +//! Common code for the `BitTorrent` protocol. +//! +//! # Glossary +//! +//! - [Announce](#announce) +//! - [Info Hash](#info-hash) +//! - [Leecher](#leechers) +//! - [Peer ID](#peer-id) +//! - [Peer List](#peer-list) +//! - [Peer](#peer) +//! - [Scrape](#scrape) +//! - [Seeders](#seeders) +//! - [Swarm](#swarm) +//! - [Tracker](#tracker) +//! +//! Glossary of `BitTorrent` terms. +//! +//! # Announce +//! +//! A request to the tracker to announce the presence of a peer. +//! +//! ## Info Hash +//! +//! A unique identifier for a torrent. +//! +//! ## Leecher +//! +//! Peers that are only downloading data. +//! +//! ## Peer ID +//! +//! A unique identifier for a peer. +//! +//! ## Peer List +//! +//! A list of peers that are downloading a torrent. +//! +//! ## Peer +//! +//! A client that is downloading or uploading a torrent. +//! +//! ## Scrape +//! +//! A request to the tracker to get information about a torrent. +//! +//! ## Seeder +//! +//! Peers that are only uploading data. +//! +//! ## Swarm +//! +//! A group of peers that are downloading the same torrent. +//! +//! ## Tracker +//! +//! A server that keeps track of peers that are downloading a torrent. +//! +//! # Links +//! +//! Description | Link +//! ---|--- +//! `BitTorrent.org`. A forum for developers to exchange ideas about the direction of the `BitTorrent` protocol | +//! Wikipedia entry for Glossary of `BitTorrent` term | +//! `BitTorrent` Specification Wiki | +//! Vuze Wiki. A `BitTorrent` client implementation | +//! `libtorrent`. Complete C++ bittorrent implementation| +//! UDP Tracker Protocol docs by `libtorrent` | +//! Percent Encoding spec | +//!Bencode & bdecode in your browser | +pub mod common; +pub mod info_hash; +pub mod tracker; diff --git a/src/shared/bit_torrent/tracker/http/client/mod.rs b/src/shared/bit_torrent/tracker/http/client/mod.rs new file mode 100644 index 000000000..4c70cd68b --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/mod.rs @@ -0,0 +1,204 @@ +pub mod requests; +pub mod responses; + +use std::net::IpAddr; +use std::sync::Arc; +use std::time::Duration; + +use hyper::StatusCode; +use requests::{announce, scrape}; +use reqwest::{Response, Url}; +use thiserror::Error; + +use crate::core::auth::Key; + +#[derive(Debug, Clone, Error)] +pub enum Error { + #[error("Failed to Build a Http Client: {err:?}")] + ClientBuildingError { err: Arc }, + #[error("Failed to get a response: {err:?}")] + ResponseError { err: Arc }, + #[error("Returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] + UnsuccessfulResponse { code: StatusCode, response: Arc }, +} + +/// HTTP Tracker Client +pub struct Client { + client: reqwest::Client, + base_url: Url, + key: Option, +} + +/// URL components in this context: +/// +/// ```text +/// http://127.0.0.1:62304/announce/YZ....rJ?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// \_____________________/\_______________/ \__________________________________________________________/ +/// | | | +/// base url path query +/// ``` +impl Client { + /// # Errors + /// + /// This method fails if the client builder fails. + pub fn new(base_url: Url, timeout: Duration) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + Ok(Self { + base_url, + client, + key: None, + }) + } + + /// Creates the new client binding it to an specific local address. + /// + /// # Errors + /// + /// This method fails if the client builder fails. + pub fn bind(base_url: Url, timeout: Duration, local_address: IpAddr) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .local_address(local_address) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + Ok(Self { + base_url, + client, + key: None, + }) + } + + /// # Errors + /// + /// This method fails if the client builder fails. + pub fn authenticated(base_url: Url, timeout: Duration, key: Key) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + Ok(Self { + base_url, + client, + key: Some(key), + }) + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn announce(&self, query: &announce::Query) -> Result { + let response = self.get(&self.build_announce_path_and_query(query)).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn scrape(&self, query: &scrape::Query) -> Result { + let response = self.get(&self.build_scrape_path_and_query(query)).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn announce_with_header(&self, query: &announce::Query, key: &str, value: &str) -> Result { + let response = self + .get_with_header(&self.build_announce_path_and_query(query), key, value) + .await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn health_check(&self) -> Result { + let response = self.get(&self.build_path("health_check")).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if there was an error while sending request. + pub async fn get(&self, path: &str) -> Result { + self.client + .get(self.build_url(path)) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() }) + } + + /// # Errors + /// + /// This method fails if there was an error while sending request. + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Result { + self.client + .get(self.build_url(path)) + .header(key, value) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() }) + } + + fn build_announce_path_and_query(&self, query: &announce::Query) -> String { + format!("{}?{query}", self.build_path("announce")) + } + + fn build_scrape_path_and_query(&self, query: &scrape::Query) -> String { + format!("{}?{query}", self.build_path("scrape")) + } + + fn build_path(&self, path: &str) -> String { + match &self.key { + Some(key) => format!("{path}/{key}"), + None => path.to_string(), + } + } + + fn build_url(&self, path: &str) -> String { + let base_url = self.base_url(); + format!("{base_url}{path}") + } + + fn base_url(&self) -> String { + self.base_url.to_string() + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs new file mode 100644 index 000000000..3c6b14222 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs @@ -0,0 +1,275 @@ +use std::fmt; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; + +use aquatic_udp_protocol::PeerId; +use serde_repr::Serialize_repr; +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: ByteArray20, + pub peer_addr: IpAddr, + pub downloaded: BaseTenASCII, + pub uploaded: BaseTenASCII, + pub peer_id: ByteArray20, + pub port: PortNumber, + pub left: BaseTenASCII, + pub event: Option, + pub compact: Option, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Announce Request: +/// +/// +/// +/// Some parameters in the specification are not implemented in this tracker yet. +impl Query { + /// It builds the URL query component for the announce request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + #[must_use] + pub fn build(&self) -> String { + self.params().to_string() + } + + #[must_use] + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub type BaseTenASCII = u64; +pub type PortNumber = u16; + +pub enum Event { + //Started, + //Stopped, + Completed, +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + //Event::Started => write!(f, "started"), + //Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(Serialize_repr, PartialEq, Debug)] +#[repr(u8)] +pub enum Compact { + Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} + +pub struct QueryBuilder { + announce_query: Query, +} + +impl QueryBuilder { + /// # Panics + /// + /// Will panic if the default info-hash value is not a valid info-hash. + #[must_use] + pub fn with_default_values() -> QueryBuilder { + let default_announce_query = Query { + info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, // # DevSkim: ignore DS173237 + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: PeerId(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + }; + Self { + announce_query: default_announce_query, + } + } + + #[must_use] + pub fn with_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.announce_query.info_hash = info_hash.0; + self + } + + #[must_use] + pub fn with_peer_id(mut self, peer_id: &PeerId) -> Self { + self.announce_query.peer_id = peer_id.0; + self + } + + #[must_use] + pub fn with_compact(mut self, compact: Compact) -> Self { + self.announce_query.compact = Some(compact); + self + } + + #[must_use] + pub fn with_peer_addr(mut self, peer_addr: &IpAddr) -> Self { + self.announce_query.peer_addr = *peer_addr; + self + } + + #[must_use] + pub fn without_compact(mut self) -> Self { + self.announce_query.compact = None; + self + } + + #[must_use] + pub fn query(self) -> Query { + self.announce_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Announce request. +/// +/// Sample Announce URL with all the GET parameters (mandatory and optional): +/// +/// ```text +/// http://127.0.0.1:7070/announce? +/// info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 (mandatory) +/// peer_addr=192.168.1.88 +/// downloaded=0 +/// uploaded=0 +/// peer_id=%2DqB00000000000000000 (mandatory) +/// port=17548 (mandatory) +/// left=0 +/// event=completed +/// compact=0 +/// ``` +pub struct QueryParams { + pub info_hash: Option, + pub peer_addr: Option, + pub downloaded: Option, + pub uploaded: Option, + pub peer_id: Option, + pub port: Option, + pub left: Option, + pub event: Option, + pub compact: Option, +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut params = vec![]; + + if let Some(info_hash) = &self.info_hash { + params.push(("info_hash", info_hash)); + } + if let Some(peer_addr) = &self.peer_addr { + params.push(("peer_addr", peer_addr)); + } + if let Some(downloaded) = &self.downloaded { + params.push(("downloaded", downloaded)); + } + if let Some(uploaded) = &self.uploaded { + params.push(("uploaded", uploaded)); + } + if let Some(peer_id) = &self.peer_id { + params.push(("peer_id", peer_id)); + } + if let Some(port) = &self.port { + params.push(("port", port)); + } + if let Some(left) = &self.left { + params.push(("left", left)); + } + if let Some(event) = &self.event { + params.push(("event", event)); + } + if let Some(compact) = &self.compact { + params.push(("compact", compact)); + } + + let query = params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(announce_query: &Query) -> Self { + let event = announce_query.event.as_ref().map(std::string::ToString::to_string); + let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); + + Self { + info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), + peer_addr: Some(announce_query.peer_addr.to_string()), + downloaded: Some(announce_query.downloaded.to_string()), + uploaded: Some(announce_query.uploaded.to_string()), + peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), + port: Some(announce_query.port.to_string()), + left: Some(announce_query.left.to_string()), + event, + compact, + } + } + + pub fn remove_optional_params(&mut self) { + // todo: make them optional with the Option<...> in the AnnounceQuery struct + // if they are really optional. So that we can crete a minimal AnnounceQuery + // instead of removing the optional params afterwards. + // + // The original specification on: + // + // says only `ip` and `event` are optional. + // + // On + // says only `ip`, `numwant`, `key` and `trackerid` are optional. + // + // but the server is responding if all these params are not included. + self.peer_addr = None; + self.downloaded = None; + self.uploaded = None; + self.left = None; + self.event = None; + self.compact = None; + } + + /// # Panics + /// + /// Will panic if invalid param name is provided. + pub fn set(&mut self, param_name: &str, param_value: &str) { + match param_name { + "info_hash" => self.info_hash = Some(param_value.to_string()), + "peer_addr" => self.peer_addr = Some(param_value.to_string()), + "downloaded" => self.downloaded = Some(param_value.to_string()), + "uploaded" => self.uploaded = Some(param_value.to_string()), + "peer_id" => self.peer_id = Some(param_value.to_string()), + "port" => self.port = Some(param_value.to_string()), + "left" => self.left = Some(param_value.to_string()), + "event" => self.event = Some(param_value.to_string()), + "compact" => self.compact = Some(param_value.to_string()), + &_ => panic!("Invalid param name for announce query"), + } + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/requests/mod.rs b/src/shared/bit_torrent/tracker/http/client/requests/mod.rs new file mode 100644 index 000000000..776d2dfbf --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/requests/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod scrape; diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs new file mode 100644 index 000000000..4d12fc2d2 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -0,0 +1,172 @@ +use std::error::Error; +use std::fmt::{self}; +use std::str::FromStr; + +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: Vec, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +#[derive(Debug)] +#[allow(dead_code)] +pub struct ConversionError(String); + +impl fmt::Display for ConversionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Invalid infohash: {}", self.0) + } +} + +impl Error for ConversionError {} + +impl TryFrom<&[String]> for Query { + type Error = ConversionError; + + fn try_from(info_hashes: &[String]) -> Result { + let mut validated_info_hashes: Vec = Vec::new(); + + for info_hash in info_hashes { + let validated_info_hash = InfoHash::from_str(info_hash).map_err(|_| ConversionError(info_hash.clone()))?; + validated_info_hashes.push(validated_info_hash.0); + } + + Ok(Self { + info_hash: validated_info_hashes, + }) + } +} + +impl TryFrom> for Query { + type Error = ConversionError; + + fn try_from(info_hashes: Vec) -> Result { + let mut validated_info_hashes: Vec = Vec::new(); + + for info_hash in info_hashes { + let validated_info_hash = InfoHash::from_str(&info_hash).map_err(|_| ConversionError(info_hash.clone()))?; + validated_info_hashes.push(validated_info_hash.0); + } + + Ok(Self { + info_hash: validated_info_hashes, + }) + } +} + +/// HTTP Tracker Scrape Request: +/// +/// +impl Query { + /// It builds the URL query component for the scrape request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + #[must_use] + pub fn build(&self) -> String { + self.params().to_string() + } + + #[must_use] + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub struct QueryBuilder { + scrape_query: Query, +} + +impl Default for QueryBuilder { + fn default() -> Self { + let default_scrape_query = Query { + info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), // # DevSkim: ignore DS173237 + }; + Self { + scrape_query: default_scrape_query, + } + } +} + +impl QueryBuilder { + #[must_use] + pub fn with_one_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash = [info_hash.0].to_vec(); + self + } + + #[must_use] + pub fn add_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash.push(info_hash.0); + self + } + + #[must_use] + pub fn query(self) -> Query { + self.scrape_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Scrape request. +/// +/// The `info_hash` param is the percent encoded of the the 20-byte array info hash. +/// +/// Sample Scrape URL with all the GET parameters: +/// +/// For `IpV4`: +/// +/// ```text +/// http://127.0.0.1:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// For `IpV6`: +/// +/// ```text +/// http://[::1]:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// You can add as many info hashes as you want, just adding the same param again. +pub struct QueryParams { + pub info_hash: Vec, +} + +impl QueryParams { + pub fn set_one_info_hash_param(&mut self, info_hash: &str) { + self.info_hash = vec![info_hash.to_string()]; + } +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let query = self + .info_hash + .iter() + .map(|info_hash| format!("info_hash={}", &info_hash)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(scrape_query: &Query) -> Self { + let info_hashes = scrape_query + .info_hash + .iter() + .map(percent_encode_byte_array) + .collect::>(); + + Self { info_hash: info_hashes } + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs new file mode 100644 index 000000000..7f2d3611c --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs @@ -0,0 +1,126 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::peer; +use zerocopy::AsBytes as _; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, // Peers using IPV4 and IPV6 +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DictionaryPeer { + pub ip: String, + #[serde(rename = "peer id")] + #[serde(with = "serde_bytes")] + pub peer_id: Vec, + pub port: u16, +} + +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { + DictionaryPeer { + peer_id: peer.peer_id.as_bytes().to_vec(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DeserializedCompact { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + #[serde(with = "serde_bytes")] + pub peers: Vec, +} + +impl DeserializedCompact { + /// # Errors + /// + /// Will return an error if bytes can't be deserialized. + pub fn from_bytes(bytes: &[u8]) -> Result { + serde_bencode::from_bytes::(bytes) + } +} + +#[derive(Debug, PartialEq)] +pub struct Compact { + // code-review: there could be a way to deserialize this struct directly + // by using serde instead of doing it manually. Or at least using a custom deserializer. + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + pub min_interval: u32, + pub peers: CompactPeerList, +} + +#[derive(Debug, PartialEq)] +pub struct CompactPeerList { + peers: Vec, +} + +impl CompactPeerList { + #[must_use] + pub fn new(peers: Vec) -> Self { + Self { peers } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeer { + ip: Ipv4Addr, + port: u16, +} + +impl CompactPeer { + /// # Panics + /// + /// Will panic if the provided socket address is a IPv6 IP address. + /// It's not supported for compact peers. + #[must_use] + pub fn new(socket_addr: &SocketAddr) -> Self { + match socket_addr.ip() { + IpAddr::V4(ip) => Self { + ip, + port: socket_addr.port(), + }, + IpAddr::V6(_ip) => panic!("IPV6 is not supported for compact peer"), + } + } + + #[must_use] + pub fn new_from_bytes(bytes: &[u8]) -> Self { + Self { + ip: Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), + port: u16::from_be_bytes([bytes[4], bytes[5]]), + } + } +} + +impl From for Compact { + fn from(compact_announce: DeserializedCompact) -> Self { + let mut peers = vec![]; + + for peer_bytes in compact_announce.peers.chunks_exact(6) { + peers.push(CompactPeer::new_from_bytes(peer_bytes)); + } + + Self { + complete: compact_announce.complete, + incomplete: compact_announce.incomplete, + interval: compact_announce.interval, + min_interval: compact_announce.min_interval, + peers: CompactPeerList::new(peers), + } + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/error.rs b/src/shared/bit_torrent/tracker/http/client/responses/error.rs new file mode 100644 index 000000000..00befdb54 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/error.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/mod.rs b/src/shared/bit_torrent/tracker/http/client/responses/mod.rs new file mode 100644 index 000000000..bdc689056 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/mod.rs @@ -0,0 +1,3 @@ +pub mod announce; +pub mod error; +pub mod scrape; diff --git a/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs new file mode 100644 index 000000000..25a2f0a81 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs @@ -0,0 +1,230 @@ +use std::collections::HashMap; +use std::fmt::Write; +use std::str; + +use serde::ser::SerializeMap; +use serde::{Deserialize, Serialize, Serializer}; +use serde_bencode::value::Value; + +use crate::shared::bit_torrent::tracker::http::{ByteArray20, InfoHash}; + +#[derive(Debug, PartialEq, Default, Deserialize)] +pub struct Response { + pub files: HashMap, +} + +impl Response { + #[must_use] + pub fn with_one_file(info_hash_bytes: ByteArray20, file: File) -> Self { + let mut files: HashMap = HashMap::new(); + files.insert(info_hash_bytes, file); + Self { files } + } + + /// # Errors + /// + /// Will return an error if the deserialized bencoded response can't not be converted into a valid response. + /// + /// # Panics + /// + /// Will panic if it can't deserialize the bencoded response. + pub fn try_from_bencoded(bytes: &[u8]) -> Result { + let scrape_response: DeserializedResponse = + serde_bencode::from_bytes(bytes).expect("provided bytes should be a valid bencoded response"); + Self::try_from(scrape_response) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +pub struct File { + pub complete: i64, // The number of active peers that have completed downloading + pub downloaded: i64, // The number of peers that have ever completed downloading + pub incomplete: i64, // The number of active peers that have not completed downloading +} + +impl File { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} + +impl TryFrom for Response { + type Error = BencodeParseError; + + fn try_from(scrape_response: DeserializedResponse) -> Result { + parse_bencoded_response(&scrape_response.files) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +struct DeserializedResponse { + pub files: Value, +} + +// Custom serialization for Response +impl Serialize for Response { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(Some(self.files.len()))?; + for (key, value) in &self.files { + // Convert ByteArray20 key to hex string + let hex_key = byte_array_to_hex_string(key); + map.serialize_entry(&hex_key, value)?; + } + map.end() + } +} + +// Helper function to convert ByteArray20 to hex string +fn byte_array_to_hex_string(byte_array: &ByteArray20) -> String { + let mut hex_string = String::with_capacity(byte_array.len() * 2); + for byte in byte_array { + write!(hex_string, "{byte:02x}").expect("Writing to string should never fail"); + } + hex_string +} + +#[derive(Default)] +pub struct ResponseBuilder { + response: Response, +} + +impl ResponseBuilder { + #[must_use] + pub fn add_file(mut self, info_hash_bytes: ByteArray20, file: File) -> Self { + self.response.files.insert(info_hash_bytes, file); + self + } + + #[must_use] + pub fn build(self) -> Response { + self.response + } +} + +#[derive(Debug)] +pub enum BencodeParseError { + InvalidValueExpectedDict { value: Value }, + InvalidValueExpectedInt { value: Value }, + InvalidFileField { value: Value }, + MissingFileField { field_name: String }, +} + +/// It parses a bencoded scrape response into a `Response` struct. +/// +/// For example: +/// +/// ```text +/// d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e +/// 20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +/// ``` +/// +/// Response (JSON encoded for readability): +/// +/// ```text +/// { +/// 'files': { +/// 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +/// 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +/// } +/// } +fn parse_bencoded_response(value: &Value) -> Result { + let mut files: HashMap = HashMap::new(); + + match value { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = parse_bencoded_file(file_value).unwrap(); + + files.insert(InfoHash::new(info_hash_byte_vec).bytes(), file); + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + } + + Ok(Response { files }) +} + +/// It parses a bencoded dictionary into a `File` struct. +/// +/// For example: +/// +/// +/// ```text +/// d8:completei11e10:downloadedi13772e10:incompletei19ee +/// ``` +/// +/// into: +/// +/// ```text +/// File { +/// complete: 11, +/// downloaded: 13772, +/// incomplete: 19, +/// } +/// ``` +fn parse_bencoded_file(value: &Value) -> Result { + let file = match &value { + Value::Dict(dict) => { + let mut complete = None; + let mut downloaded = None; + let mut incomplete = None; + + for file_field in dict { + let field_name = file_field.0; + + let field_value = match file_field.1 { + Value::Int(number) => Ok(*number), + _ => Err(BencodeParseError::InvalidValueExpectedInt { + value: file_field.1.clone(), + }), + }?; + + if field_name == b"complete" { + complete = Some(field_value); + } else if field_name == b"downloaded" { + downloaded = Some(field_value); + } else if field_name == b"incomplete" { + incomplete = Some(field_value); + } else { + return Err(BencodeParseError::InvalidFileField { + value: file_field.1.clone(), + }); + } + } + + if complete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "complete".to_string(), + }); + } + + if downloaded.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "downloaded".to_string(), + }); + } + + if incomplete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "incomplete".to_string(), + }); + } + + File { + complete: complete.unwrap(), + downloaded: downloaded.unwrap(), + incomplete: incomplete.unwrap(), + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + }; + + Ok(file) +} diff --git a/src/shared/bit_torrent/tracker/http/mod.rs b/src/shared/bit_torrent/tracker/http/mod.rs new file mode 100644 index 000000000..15723c1b7 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/mod.rs @@ -0,0 +1,26 @@ +pub mod client; + +use percent_encoding::NON_ALPHANUMERIC; + +pub type ByteArray20 = [u8; 20]; + +#[must_use] +pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { + percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() +} + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + #[must_use] + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + #[must_use] + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/src/shared/bit_torrent/tracker/mod.rs b/src/shared/bit_torrent/tracker/mod.rs new file mode 100644 index 000000000..b08eaa622 --- /dev/null +++ b/src/shared/bit_torrent/tracker/mod.rs @@ -0,0 +1,2 @@ +pub mod http; +pub mod udp; diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs new file mode 100644 index 000000000..edb8adc85 --- /dev/null +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -0,0 +1,270 @@ +use core::result::Result::{Err, Ok}; +use std::io::Cursor; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::sync::Arc; +use std::time::Duration; + +use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; +use tokio::net::UdpSocket; +use tokio::time; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use zerocopy::network_endian::I32; + +use super::Error; +use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; + +pub const UDP_CLIENT_LOG_TARGET: &str = "UDP CLIENT"; + +#[allow(clippy::module_name_repetitions)] +#[derive(Debug)] +pub struct UdpClient { + /// The socket to connect to + pub socket: Arc, + + /// Timeout for sending and receiving packets + pub timeout: Duration, +} + +impl UdpClient { + /// Creates a new `UdpClient` bound to the default port and ipv6 address + /// + /// # Errors + /// + /// Will return error if unable to bind to any port or ip address. + /// + async fn bound_to_default_ipv4(timeout: Duration) -> Result { + let addr = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0); + + Self::bound(addr, timeout).await + } + + /// Creates a new `UdpClient` bound to the default port and ipv6 address + /// + /// # Errors + /// + /// Will return error if unable to bind to any port or ip address. + /// + async fn bound_to_default_ipv6(timeout: Duration) -> Result { + let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0); + + Self::bound(addr, timeout).await + } + + /// Creates a new `UdpClient` connected to a Udp server + /// + /// # Errors + /// + /// Will return any errors present in the call stack + /// + pub async fn connected(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = if remote_addr.is_ipv4() { + Self::bound_to_default_ipv4(timeout).await? + } else { + Self::bound_to_default_ipv6(timeout).await? + }; + + client.connect(remote_addr).await?; + Ok(client) + } + + /// Creates a `[UdpClient]` bound to a Socket. + /// + /// # Panics + /// + /// Panics if unable to get the `local_addr` of the bound socket. + /// + /// # Errors + /// + /// This function will return an error if the binding takes to long + /// or if there is an underlying OS error. + pub async fn bound(addr: SocketAddr, timeout: Duration) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "binding to socket: {addr:?} ..."); + + let socket = time::timeout(timeout, UdpSocket::bind(addr)) + .await + .map_err(|_| Error::TimeoutWhileBindingToSocket { addr })? + .map_err(|e| Error::UnableToBindToSocket { err: e.into(), addr })?; + + let addr = socket.local_addr().expect("it should get the local address"); + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "bound to socket: {addr:?}."); + + let udp_client = Self { + socket: Arc::new(socket), + timeout, + }; + + Ok(udp_client) + } + + /// # Errors + /// + /// Will return error if can't connect to the socket. + pub async fn connect(&self, remote_addr: SocketAddr) -> Result<(), Error> { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "connecting to remote: {remote_addr:?} ..."); + + let () = time::timeout(self.timeout, self.socket.connect(remote_addr)) + .await + .map_err(|_| Error::TimeoutWhileConnectingToRemote { remote_addr })? + .map_err(|e| Error::UnableToConnectToRemote { + err: e.into(), + remote_addr, + })?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "connected to remote: {remote_addr:?}."); + + Ok(()) + } + + /// # Errors + /// + /// Will return error if: + /// + /// - Can't write to the socket. + /// - Can't send data. + pub async fn send(&self, bytes: &[u8]) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending {bytes:?} ..."); + + let () = time::timeout(self.timeout, self.socket.writable()) + .await + .map_err(|_| Error::TimeoutWaitForWriteableSocket)? + .map_err(|e| Error::UnableToGetWritableSocket { err: e.into() })?; + + let sent_bytes = time::timeout(self.timeout, self.socket.send(bytes)) + .await + .map_err(|_| Error::TimeoutWhileSendingData { data: bytes.to_vec() })? + .map_err(|e| Error::UnableToSendData { + err: e.into(), + data: bytes.to_vec(), + })?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "sent {sent_bytes} bytes to remote."); + + Ok(sent_bytes) + } + + /// # Errors + /// + /// Will return error if: + /// + /// - Can't read from the socket. + /// - Can't receive data. + /// + /// # Panics + /// + pub async fn receive(&self) -> Result, Error> { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "receiving ..."); + + let mut buffer = [0u8; MAX_PACKET_SIZE]; + + let () = time::timeout(self.timeout, self.socket.readable()) + .await + .map_err(|_| Error::TimeoutWaitForReadableSocket)? + .map_err(|e| Error::UnableToGetReadableSocket { err: e.into() })?; + + let received_bytes = time::timeout(self.timeout, self.socket.recv(&mut buffer)) + .await + .map_err(|_| Error::TimeoutWhileReceivingData)? + .map_err(|e| Error::UnableToReceivingData { err: e.into() })?; + + let mut received: Vec = buffer.to_vec(); + Vec::truncate(&mut received, received_bytes); + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {received_bytes} bytes: {received:?}"); + + Ok(received) + } +} + +#[allow(clippy::module_name_repetitions)] +#[derive(Debug)] +pub struct UdpTrackerClient { + pub client: UdpClient, +} + +impl UdpTrackerClient { + /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server + /// + /// # Errors + /// + /// If unable to connect to the remote address. + /// + pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = UdpClient::connected(remote_addr, timeout).await?; + Ok(UdpTrackerClient { client }) + } + + /// # Errors + /// + /// Will return error if can't write request to bytes. + pub async fn send(&self, request: Request) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending request {request:?} ..."); + + // Write request into a buffer + // todo: optimize the pre-allocated amount based upon request type. + let mut writer = Cursor::new(Vec::with_capacity(200)); + let () = request + .write_bytes(&mut writer) + .map_err(|e| Error::UnableToWriteDataFromRequest { err: e.into(), request })?; + + self.client.send(writer.get_ref()).await + } + + /// # Errors + /// + /// Will return error if can't create response from the received payload (bytes buffer). + pub async fn receive(&self) -> Result { + let response = self.client.receive().await?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {} bytes: {response:?}", response.len()); + + Response::parse_bytes(&response, true).map_err(|e| Error::UnableToParseResponse { err: e.into(), response }) + } +} + +/// Helper Function to Check if a UDP Service is Connectable +/// +/// # Panics +/// +/// It will return an error if unable to connect to the UDP service. +/// +/// # Errors +/// +pub async fn check(remote_addr: &SocketAddr) -> Result { + tracing::debug!("Checking Service (detail): {remote_addr:?}."); + + match UdpTrackerClient::new(*remote_addr, DEFAULT_TIMEOUT).await { + Ok(client) => { + let connect_request = ConnectRequest { + transaction_id: TransactionId(I32::new(123)), + }; + + // client.send() return usize, but doesn't use here + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(e) => tracing::debug!("Error: {e:?}."), + }; + + let process = move |response| { + if matches!(response, Response::Connect(_connect_response)) { + Ok("Connected".to_string()) + } else { + Err("Did not Connect".to_string()) + } + }; + + let sleep = time::sleep(Duration::from_millis(2000)); + tokio::pin!(sleep); + + tokio::select! { + () = &mut sleep => { + Err("Timed Out".to_string()) + } + response = client.receive() => { + process(response.unwrap()) + } + } + } + Err(e) => Err(format!("{e:?}")), + } +} diff --git a/src/shared/bit_torrent/tracker/udp/mod.rs b/src/shared/bit_torrent/tracker/udp/mod.rs new file mode 100644 index 000000000..b9d5f34f6 --- /dev/null +++ b/src/shared/bit_torrent/tracker/udp/mod.rs @@ -0,0 +1,68 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::Request; +use thiserror::Error; +use torrust_tracker_located_error::DynError; + +pub mod client; + +/// The maximum number of bytes in a UDP packet. +pub const MAX_PACKET_SIZE: usize = 1496; +/// A magic 64-bit integer constant defined in the protocol that is used to +/// identify the protocol. +pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; + +#[derive(Debug, Clone, Error)] +pub enum Error { + #[error("Timeout while waiting for socket to bind: {addr:?}")] + TimeoutWhileBindingToSocket { addr: SocketAddr }, + + #[error("Failed to bind to socket: {addr:?}, with error: {err:?}")] + UnableToBindToSocket { err: Arc, addr: SocketAddr }, + + #[error("Timeout while waiting for connection to remote: {remote_addr:?}")] + TimeoutWhileConnectingToRemote { remote_addr: SocketAddr }, + + #[error("Failed to connect to remote: {remote_addr:?}, with error: {err:?}")] + UnableToConnectToRemote { + err: Arc, + remote_addr: SocketAddr, + }, + + #[error("Timeout while waiting for the socket to become writable.")] + TimeoutWaitForWriteableSocket, + + #[error("Failed to get writable socket: {err:?}")] + UnableToGetWritableSocket { err: Arc }, + + #[error("Timeout while trying to send data: {data:?}")] + TimeoutWhileSendingData { data: Vec }, + + #[error("Failed to send data: {data:?}, with error: {err:?}")] + UnableToSendData { err: Arc, data: Vec }, + + #[error("Timeout while waiting for the socket to become readable.")] + TimeoutWaitForReadableSocket, + + #[error("Failed to get readable socket: {err:?}")] + UnableToGetReadableSocket { err: Arc }, + + #[error("Timeout while trying to receive data.")] + TimeoutWhileReceivingData, + + #[error("Failed to receive data: {err:?}")] + UnableToReceivingData { err: Arc }, + + #[error("Failed to get data from request: {request:?}, with error: {err:?}")] + UnableToWriteDataFromRequest { err: Arc, request: Request }, + + #[error("Failed to parse response: {response:?}, with error: {err:?}")] + UnableToParseResponse { err: Arc, response: Vec }, +} + +impl From for DynError { + fn from(e: Error) -> Self { + Arc::new(Box::new(e)) + } +} diff --git a/src/shared/crypto/ephemeral_instance_keys.rs b/src/shared/crypto/ephemeral_instance_keys.rs new file mode 100644 index 000000000..44283365a --- /dev/null +++ b/src/shared/crypto/ephemeral_instance_keys.rs @@ -0,0 +1,13 @@ +//! This module contains the ephemeral instance keys used by the application. +//! +//! They are ephemeral because they are generated at runtime when the +//! application starts and are not persisted anywhere. +use rand::rngs::ThreadRng; +use rand::Rng; + +pub type Seed = [u8; 32]; + +lazy_static! { + /// The random static seed. + pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); +} diff --git a/src/shared/crypto/keys.rs b/src/shared/crypto/keys.rs new file mode 100644 index 000000000..deb70574f --- /dev/null +++ b/src/shared/crypto/keys.rs @@ -0,0 +1,110 @@ +//! This module contains logic related to cryptographic keys. +pub mod seeds { + //! This module contains logic related to cryptographic seeds. + //! + //! Specifically, it contains the logic for storing the seed and providing + //! it to other modules. + //! + //! A **seed** is a pseudo-random number that is used as a secret key for + //! cryptographic operations. + use self::detail::CURRENT_SEED; + use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; + + /// This trait is for structures that can keep and provide a seed. + pub trait Keeper { + type Seed: Sized + Default + AsMut<[u8]>; + + /// It returns a reference to the seed that is keeping. + fn get_seed() -> &'static Self::Seed; + } + + /// The seed keeper for the instance. When the application is running + /// in production, this will be the seed keeper that is used. + pub struct Instance; + + /// The seed keeper for the current execution. It's a facade at compilation + /// time that will either be the instance seed keeper (with a randomly + /// generated key for production) or the zeroed seed keeper. + pub struct Current; + + impl Keeper for Instance { + type Seed = Seed; + + fn get_seed() -> &'static Self::Seed { + &RANDOM_SEED + } + } + + impl Keeper for Current { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &CURRENT_SEED + } + } + + #[cfg(test)] + mod tests { + use super::detail::ZEROED_TEST_SEED; + use super::{Current, Instance, Keeper}; + use crate::shared::crypto::ephemeral_instance_keys::Seed; + + pub struct ZeroedTestSeed; + + impl Keeper for ZeroedTestSeed { + type Seed = Seed; + + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &ZEROED_TEST_SEED + } + } + + #[test] + fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { + assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); + } + + #[test] + fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { + assert_ne!(Current::get_seed(), Instance::get_seed()); + } + } + + mod detail { + use crate::shared::crypto::ephemeral_instance_keys::Seed; + + #[allow(dead_code)] + pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; + + #[cfg(test)] + pub use ZEROED_TEST_SEED as CURRENT_SEED; + + #[cfg(not(test))] + pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; + + #[cfg(test)] + mod tests { + use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; + use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; + use crate::shared::crypto::keys::seeds::CURRENT_SEED; + + #[test] + fn it_should_have_a_zero_test_seed() { + assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); + } + + #[test] + fn it_should_default_to_zeroed_seed_when_testing() { + assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); + } + + #[test] + fn it_should_have_a_large_random_seed() { + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); + } + } + } +} diff --git a/src/shared/crypto/mod.rs b/src/shared/crypto/mod.rs new file mode 100644 index 000000000..3c7c287b5 --- /dev/null +++ b/src/shared/crypto/mod.rs @@ -0,0 +1,3 @@ +//! Cryptographic primitives. +pub mod ephemeral_instance_keys; +pub mod keys; diff --git a/src/shared/mod.rs b/src/shared/mod.rs new file mode 100644 index 000000000..8c95effe1 --- /dev/null +++ b/src/shared/mod.rs @@ -0,0 +1,6 @@ +//! Modules with generic logic used by several modules. +//! +//! - [`bit_torrent`]: `BitTorrent` protocol related logic. +//! - [`crypto`]: Encryption related logic. +pub mod bit_torrent; +pub mod crypto; diff --git a/src/tracker.rs b/src/tracker.rs deleted file mode 100644 index 3e6bcca3e..000000000 --- a/src/tracker.rs +++ /dev/null @@ -1,439 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::borrow::Cow; -use std::collections::BTreeMap; -use tokio::sync::RwLock; -use crate::common::{NumberOfBytes, InfoHash}; -use super::common::*; -use std::net::{SocketAddr, IpAddr}; -use crate::{Configuration, http_server, key_manager, udp_server}; -use std::collections::btree_map::Entry; -use crate::database::SqliteDatabase; -use std::sync::Arc; -use log::debug; -use crate::key_manager::{AuthKey}; -use r2d2_sqlite::rusqlite; - -const TWO_HOURS: std::time::Duration = std::time::Duration::from_secs(3600 * 2); -const FIVE_MINUTES: std::time::Duration = std::time::Duration::from_secs(300); - -#[derive(Serialize, Deserialize, Clone, PartialEq)] -pub enum TrackerMode { - // Will track every new info hash and serve every peer. - #[serde(rename = "public")] - PublicMode, - - // Will only track whitelisted info hashes. - #[serde(rename = "listed")] - ListedMode, - - // Will only serve authenticated peers - #[serde(rename = "private")] - PrivateMode, - - // Will only track whitelisted info hashes and serve authenticated peers - #[serde(rename = "private_listed")] - PrivateListedMode, -} - -#[derive(PartialEq, Eq, Debug, Clone, Serialize)] -pub struct TorrentPeer { - #[serde(skip)] - pub peer_id: PeerId, - #[serde(rename = "ip")] - pub peer_addr: SocketAddr, - #[serde(serialize_with = "ser_instant")] - pub updated: std::time::Instant, - pub uploaded: NumberOfBytes, - pub downloaded: NumberOfBytes, - pub left: NumberOfBytes, - pub event: AnnounceEvent, -} - -impl TorrentPeer { - pub fn from_udp_announce_request(announce_request: &udp_server::AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { - // Potentially substitute localhost IP with external IP - let peer_addr = match peer_addr { - None => SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port.0), - Some(peer_addr) => { - if remote_addr.ip().is_loopback() { - SocketAddr::new(IpAddr::from(peer_addr), announce_request.port.0) - } else { - SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port.0) - } - } - }; - - TorrentPeer { - peer_id: announce_request.peer_id, - peer_addr, - updated: std::time::Instant::now(), - uploaded: announce_request.bytes_uploaded, - downloaded: announce_request.bytes_downloaded, - left: announce_request.bytes_left, - event: announce_request.event - } - } - - pub fn from_http_announce_request(announce_request: &http_server::AnnounceRequest, remote_addr: SocketAddr, peer_addr: Option) -> Self { - // Potentially substitute localhost IP with external IP - let peer_addr = match peer_addr { - None => SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port), - Some(peer_addr) => { - if remote_addr.ip().is_loopback() { - SocketAddr::new(IpAddr::from(peer_addr), announce_request.port) - } else { - SocketAddr::new(IpAddr::from(remote_addr.ip()), announce_request.port) - } - } - }; - - let event: AnnounceEvent = if let Some(event) = &announce_request.event { - match event.as_ref() { - "started" => AnnounceEvent::Started, - "stopped" => AnnounceEvent::Stopped, - "completed" => AnnounceEvent::Completed, - _ => AnnounceEvent::None - } - } else { - AnnounceEvent::None - }; - - TorrentPeer { - peer_id: PeerId::from(announce_request.peer_id.as_bytes()), - peer_addr, - updated: std::time::Instant::now(), - uploaded: announce_request.uploaded, - downloaded: announce_request.downloaded, - left: announce_request.left, - event - } - } - - fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } - - fn is_completed(&self) -> bool { - self.event == AnnounceEvent::Completed - } -} - -fn ser_instant(inst: &std::time::Instant, ser: S) -> Result { - ser.serialize_u64(inst.elapsed().as_millis() as u64) -} - -#[derive(Serialize, Deserialize, Clone)] -pub struct TorrentEntry { - #[serde(skip)] - peers: std::collections::BTreeMap, - completed: u32, - #[serde(skip)] - seeders: u32, -} - -impl TorrentEntry { - pub fn new() -> TorrentEntry { - TorrentEntry { - peers: std::collections::BTreeMap::new(), - completed: 0, - seeders: 0, - } - } - - pub fn update_peer(&mut self, peer: &TorrentPeer) { - match peer.event { - AnnounceEvent::Stopped => { - let peer_old = self.peers.remove(&peer.peer_id); - self.update_torrent_stats_with_peer(peer, peer_old); - } - _ => { - let peer_old = self.peers.insert(peer.peer_id, peer.clone()); - self.update_torrent_stats_with_peer(peer, peer_old); - } - } - } - - pub fn get_peers(&self, remote_addr: &std::net::SocketAddr) -> Vec { - let mut list = Vec::new(); - for (_, peer) in self - .peers - .iter() - .filter(|e| e.1.peer_addr.is_ipv4()) - .take(MAX_SCRAPE_TORRENTS as usize) - { - - // skip ip address of client - if peer.peer_addr == *remote_addr { - //continue; - } - - list.push(peer.clone()); - } - list - } - - pub fn get_peers_iter(&self) -> impl Iterator { - self.peers.iter() - } - - pub fn update_torrent_stats_with_peer(&mut self, peer: &TorrentPeer, peer_old: Option) { - match peer_old { - None => { - if peer.is_seeder() { - self.seeders += 1; - } - - if peer.is_completed() { - self.completed += 1; - } - } - Some(peer_old) => { - match peer.event { - AnnounceEvent::None => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - } - AnnounceEvent::Completed => { - if peer.is_seeder() && !peer_old.is_seeder() { - self.seeders += 1; - } - - // don't double count completed - if !peer_old.is_completed() { - self.completed += 1; - } - } - AnnounceEvent::Stopped => { - if peer_old.is_seeder() { - self.seeders -= 1; - } - } - // impossible, started should be the first time a peer announces itself - AnnounceEvent::Started => {} - } - } - } - } - - pub fn get_stats(&self) -> (u32, u32, u32) { - let leechers: u32 = if self.seeders < (self.peers.len() as u32) { - (self.peers.len() as u32) - self.seeders - } else { - 0 - }; - - (self.seeders, self.completed, leechers) - } -} - -#[derive(Serialize, Deserialize)] -struct DatabaseRow<'a> { - info_hash: InfoHash, - entry: Cow<'a, TorrentEntry>, -} - -#[derive(Debug)] -pub struct TorrentStats { - pub completed: u32, - pub seeders: u32, - pub leechers: u32, -} - -#[derive(Debug)] -pub enum TorrentError { - TorrentNotWhitelisted, - PeerNotAuthenticated, - PeerKeyNotValid, -} - -pub struct TorrentTracker { - pub config: Arc, - torrents: tokio::sync::RwLock>, - database: SqliteDatabase, -} - -impl TorrentTracker { - pub fn new(config: Arc) -> TorrentTracker { - let database = SqliteDatabase::new(&config.db_path).unwrap_or_else(|error| { - panic!("Could not create SQLite database. Reason: {}", error) - }); - - TorrentTracker { - config, - torrents: RwLock::new(std::collections::BTreeMap::new()), - database, - } - } - - pub async fn generate_auth_key(&self, seconds_valid: u64) -> Result { - let auth_key = key_manager::generate_auth_key(seconds_valid); - - // add key to database - if let Err(error) = self.database.add_key_to_keys(&auth_key).await { return Err(error) } - - Ok(auth_key) - } - - pub async fn remove_auth_key(&self, key: String) -> Result { - self.database.remove_key_from_keys(key).await - } - - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key_manager::Error> { - let db_key = self.database.get_key_from_keys(&auth_key.key).await?; - key_manager::verify_auth_key(&db_key) - } - - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { - match self.config.mode { - TrackerMode::PublicMode => Ok(()), - TrackerMode::ListedMode => { - if !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted) - } - - Ok(()) - } - TrackerMode::PrivateMode => { - match key { - Some(key) => { - if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) - } - - Ok(()) - } - None => { - return Err(TorrentError::PeerNotAuthenticated) - } - } - } - TrackerMode::PrivateListedMode => { - match key { - Some(key) => { - if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) - } - - if !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted) - } - - Ok(()) - } - None => { - return Err(TorrentError::PeerNotAuthenticated) - } - } - } - } - } - - // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result { - self.database.add_info_hash_to_whitelist(info_hash.clone()).await - } - - // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result { - self.database.remove_info_hash_from_whitelist(info_hash.clone()).await - } - - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - match self.database.get_info_hash_from_whitelist(&info_hash.to_string()).await { - Ok(_) => true, - Err(_) => false - } - } - - - pub async fn get_torrent_peers( - &self, - info_hash: &InfoHash, - peer_addr: &std::net::SocketAddr - ) -> Option> { - let read_lock = self.torrents.read().await; - match read_lock.get(info_hash) { - None => { - None - } - Some(entry) => { - Some(entry.get_peers(peer_addr)) - } - } - } - - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> Result { - let mut torrents = self.torrents.write().await; - - let torrent_entry = match torrents.entry(info_hash.clone()) { - Entry::Vacant(vacant) => { - Ok(vacant.insert(TorrentEntry::new())) - } - Entry::Occupied(entry) => { - Ok(entry.into_mut()) - } - }; - - match torrent_entry { - Ok(torrent_entry) => { - torrent_entry.update_peer(peer); - - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - Ok(TorrentStats { - seeders, - leechers, - completed, - }) - } - Err(e) => Err(e) - } - } - - pub async fn get_torrents(&self) -> tokio::sync::RwLockReadGuard<'_, BTreeMap> { - self.torrents.read().await - } - - // remove torrents without peers - pub async fn cleanup_torrents(&self) { - debug!("Cleaning torrents.."); - let mut lock = self.torrents.write().await; - let db: &mut BTreeMap = &mut *lock; - let mut torrents_to_remove = Vec::new(); - - for (k, torrent_entry) in db.iter_mut() { - // timed-out peers.. - { - let mut peers_to_remove = Vec::new(); - let torrent_peers = &mut torrent_entry.peers; - - for (peer_id, peer) in torrent_peers.iter() { - if peer.is_seeder() { - if peer.updated.elapsed() > FIVE_MINUTES { - // remove seeders after 5 minutes since last update... - peers_to_remove.push(*peer_id); - torrent_entry.seeders -= 1; - } - } else if peer.updated.elapsed() > TWO_HOURS { - // remove peers after 2 hours since last update... - peers_to_remove.push(*peer_id); - } - } - - for peer_id in peers_to_remove.iter() { - torrent_peers.remove(peer_id); - } - } - - if self.config.mode.clone() == TrackerMode::PublicMode { - // peer-less torrents.. - if torrent_entry.peers.len() == 0 { - torrents_to_remove.push(k.clone()); - } - } - } - - for info_hash in torrents_to_remove { - db.remove(&info_hash); - } - } -} diff --git a/src/udp_server.rs b/src/udp_server.rs deleted file mode 100644 index 079ff67a0..000000000 --- a/src/udp_server.rs +++ /dev/null @@ -1,482 +0,0 @@ -use log::{debug}; -use std; -use std::convert::TryInto; -use std::io; -use std::net::{Ipv4Addr, SocketAddr}; -use std::sync::Arc; -use std::io::{Cursor, Read}; -use tokio::net::UdpSocket; -use byteorder::{NetworkEndian, ReadBytesExt}; - -use super::common::*; -use crate::response::*; -use crate::utils::get_connection_id; -use crate::tracker::TorrentTracker; -use crate::{TorrentPeer, TrackerMode, TorrentError}; -use crate::key_manager::AuthKey; - -#[derive(PartialEq, Eq, Clone, Debug)] -pub enum Request { - Connect(ConnectRequest), - Announce(AnnounceRequest), - Scrape(ScrapeRequest), -} - -impl From for Request { - fn from(r: ConnectRequest) -> Self { - Self::Connect(r) - } -} - -impl From for Request { - fn from(r: AnnounceRequest) -> Self { - Self::Announce(r) - } -} - -impl From for Request { - fn from(r: ScrapeRequest) -> Self { - Self::Scrape(r) - } -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct ConnectRequest { - pub transaction_id: TransactionId, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct AnnounceRequest { - pub connection_id: ConnectionId, - pub transaction_id: TransactionId, - pub info_hash: InfoHash, - pub peer_id: PeerId, - pub bytes_downloaded: NumberOfBytes, - pub bytes_uploaded: NumberOfBytes, - pub bytes_left: NumberOfBytes, - pub event: AnnounceEvent, - pub ip_address: Option, - pub key: PeerKey, - pub peers_wanted: NumberOfPeers, - pub port: Port, - pub auth_key: Option, -} - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct ScrapeRequest { - pub connection_id: ConnectionId, - pub transaction_id: TransactionId, - pub info_hashes: Vec, -} - -#[derive(Debug)] -pub struct RequestParseError { - pub transaction_id: Option, - pub message: Option, - pub error: Option, -} - -impl RequestParseError { - pub fn new(err: io::Error, transaction_id: i32) -> Self { - Self { - transaction_id: Some(TransactionId(transaction_id)), - message: None, - error: Some(err), - } - } - pub fn io(err: io::Error) -> Self { - Self { - transaction_id: None, - message: None, - error: Some(err), - } - } - pub fn text(transaction_id: i32, message: &str) -> Self { - Self { - transaction_id: Some(TransactionId(transaction_id)), - message: Some(message.to_string()), - error: None, - } - } -} - -impl Request { - pub fn from_bytes(bytes: &[u8]) -> Result { - let mut cursor = Cursor::new(bytes); - - let connection_id = cursor - .read_i64::() - .map_err(RequestParseError::io)?; - let action = cursor - .read_i32::() - .map_err(RequestParseError::io)?; - let transaction_id = cursor - .read_i32::() - .map_err(RequestParseError::io)?; - - - - match action { - // Connect - 0 => { - if connection_id == PROTOCOL_ID { - Ok((ConnectRequest { - transaction_id: TransactionId(transaction_id), - }) - .into()) - } else { - Err(RequestParseError::text( - transaction_id, - "Protocol identifier missing", - )) - } - } - - // Announce - 1 => { - let mut info_hash = [0; 20]; - let mut peer_id = [0; 20]; - let mut ip = [0; 4]; - - cursor - .read_exact(&mut info_hash) - .map_err(|err| RequestParseError::new(err, transaction_id))?; - cursor - .read_exact(&mut peer_id) - .map_err(|err| RequestParseError::new(err, transaction_id))?; - - let bytes_downloaded = cursor - .read_i64::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let bytes_left = cursor - .read_i64::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let bytes_uploaded = cursor - .read_i64::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let event = cursor - .read_i32::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - - cursor - .read_exact(&mut ip) - .map_err(|err| RequestParseError::new(err, transaction_id))?; - - let key = cursor - .read_u32::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let peers_wanted = cursor - .read_i32::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - let port = cursor - .read_u16::() - .map_err(|err| RequestParseError::new(err, transaction_id))?; - - // BEP 41: add auth key if available - let auth_key: Option = if bytes.len() > 98 + AUTH_KEY_LENGTH { - let mut key_buffer = [0; AUTH_KEY_LENGTH]; - // key should be the last bytes - cursor.set_position((bytes.len() - AUTH_KEY_LENGTH) as u64); - if cursor.read_exact(&mut key_buffer).is_ok() { - debug!("AuthKey buffer: {:?}", key_buffer); - AuthKey::from_buffer(key_buffer) - } else { - None - } - } else { - None - }; - - let opt_ip = if ip == [0; 4] { - None - } else { - Some(Ipv4Addr::from(ip)) - }; - - Ok((AnnounceRequest { - connection_id: ConnectionId(connection_id), - transaction_id: TransactionId(transaction_id), - info_hash: InfoHash(info_hash), - peer_id: PeerId(peer_id), - bytes_downloaded: NumberOfBytes(bytes_downloaded), - bytes_uploaded: NumberOfBytes(bytes_uploaded), - bytes_left: NumberOfBytes(bytes_left), - event: AnnounceEvent::from_i32(event), - ip_address: opt_ip, - key: PeerKey(key), - peers_wanted: NumberOfPeers(peers_wanted), - port: Port(port), - auth_key, - }) - .into()) - } - - // Scrape - 2 => { - let position = cursor.position() as usize; - let inner = cursor.into_inner(); - - let info_hashes = (&inner[position..]) - .chunks_exact(20) - .take(MAX_SCRAPE_TORRENTS as usize) - .map(|chunk| InfoHash(chunk.try_into().unwrap())) - .collect(); - - Ok((ScrapeRequest { - connection_id: ConnectionId(connection_id), - transaction_id: TransactionId(transaction_id), - info_hashes, - }) - .into()) - } - - _ => Err(RequestParseError::text(transaction_id, "Invalid action")), - } - } -} - -pub struct UdpServer { - socket: UdpSocket, - tracker: Arc, -} - -impl UdpServer { - pub async fn new(tracker: Arc) -> Result { - let srv = UdpSocket::bind(&tracker.config.udp_tracker.bind_address).await?; - - Ok(UdpServer { - socket: srv, - tracker, - }) - } - - pub async fn authenticate_announce_request(&self, announce_request: &AnnounceRequest) -> Result<(), TorrentError> { - match self.tracker.config.mode { - TrackerMode::PublicMode => Ok(()), - TrackerMode::ListedMode => { - if !self.tracker.is_info_hash_whitelisted(&announce_request.info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted) - } - - Ok(()) - } - TrackerMode::PrivateMode => { - match &announce_request.auth_key { - Some(auth_key) => { - if self.tracker.verify_auth_key(auth_key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) - } - - Ok(()) - } - None => { - return Err(TorrentError::PeerNotAuthenticated) - } - } - } - TrackerMode::PrivateListedMode => { - match &announce_request.auth_key { - Some(auth_key) => { - if self.tracker.verify_auth_key(auth_key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid) - } - - if !self.tracker.is_info_hash_whitelisted(&announce_request.info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted) - } - - Ok(()) - } - None => { - return Err(TorrentError::PeerNotAuthenticated) - } - } - } - } - } - - pub async fn accept_packets(self) -> Result<(), std::io::Error> { - let tracker = Arc::new(self); - - loop { - let mut packet = vec![0u8; MAX_PACKET_SIZE]; - let (size, remote_address) = tracker.socket.recv_from(packet.as_mut_slice()).await?; - - let tracker = tracker.clone(); - tokio::spawn(async move { - debug!("Received {} bytes from {}", size, remote_address); - tracker.handle_packet(remote_address, &packet[..size]).await; - }); - } - } - - async fn handle_packet(&self, remote_addr: SocketAddr, payload: &[u8]) { - let request = Request::from_bytes(&payload[..payload.len()]); - - match request { - Ok(request) => { - debug!("New request: {:?}", request); - - // todo: check for expired connection_id - match request { - Request::Connect(r) => self.handle_connect(remote_addr, r).await, - Request::Announce(r) => { - match self.tracker.authenticate_request(&r.info_hash, &r.auth_key).await { - Ok(()) => self.handle_announce(remote_addr, r).await, - Err(e) => { - match e { - TorrentError::TorrentNotWhitelisted => { - debug!("Info_hash not whitelisted."); - self.send_error(remote_addr, &r.transaction_id, "torrent not whitelisted").await; - } - TorrentError::PeerKeyNotValid => { - debug!("Peer key not valid."); - self.send_error(remote_addr, &r.transaction_id, "peer key not valid").await; - } - TorrentError::PeerNotAuthenticated => { - debug!("Peer not authenticated."); - self.send_error(remote_addr, &r.transaction_id, "peer not authenticated").await; - } - } - } - } - }, - Request::Scrape(r) => self.handle_scrape(remote_addr, r).await - } - } - Err(err) => { - debug!("request_from_bytes error: {:?}", err); - } - } - } - - async fn handle_connect(&self, remote_addr: SocketAddr, request: ConnectRequest) { - let connection_id = get_connection_id(&remote_addr); - - let response = UdpResponse::from(UdpConnectionResponse { - action: Actions::Connect, - transaction_id: request.transaction_id, - connection_id, - }); - - let _ = self.send_response(remote_addr, response).await; - } - - async fn handle_announce(&self, remote_addr: SocketAddr, request: AnnounceRequest) { - let peer = TorrentPeer::from_udp_announce_request(&request, remote_addr, self.tracker.config.get_ext_ip()); - - match self.tracker.update_torrent_with_peer_and_get_stats(&request.info_hash, &peer).await { - Ok(torrent_stats) => { - // get all peers excluding the client_addr - let peers = match self.tracker.get_torrent_peers(&request.info_hash, &peer.peer_addr).await { - Some(v) => v, - None => { - debug!("announce: No peers found."); - return; - } - }; - - let response = UdpResponse::from(UdpAnnounceResponse { - action: Actions::Announce, - transaction_id: request.transaction_id, - interval: self.tracker.config.udp_tracker.announce_interval, - leechers: torrent_stats.leechers, - seeders: torrent_stats.seeders, - peers, - }); - - let _ = self.send_response(remote_addr, response).await; - } - Err(e) => { - debug!("{:?}", e); - self.send_error(remote_addr, &request.transaction_id, "error adding torrent").await; - } - } - } - - async fn handle_scrape(&self, remote_addr: SocketAddr, request: ScrapeRequest) { - let mut scrape_response = UdpScrapeResponse { - action: Actions::Scrape, - transaction_id: request.transaction_id, - torrent_stats: Vec::new(), - }; - - let db = self.tracker.get_torrents().await; - - for info_hash in request.info_hashes.iter() { - let scrape_entry = match db.get(&info_hash) { - Some(torrent_info) => { - let (seeders, completed, leechers) = torrent_info.get_stats(); - - UdpScrapeResponseEntry { - seeders: seeders as i32, - completed: completed as i32, - leechers: leechers as i32, - } - } - None => { - UdpScrapeResponseEntry { - seeders: 0, - completed: 0, - leechers: 0, - } - } - }; - - scrape_response.torrent_stats.push(scrape_entry); - } - - let response = UdpResponse::from(scrape_response); - - let _ = self.send_response(remote_addr, response).await; - } - - async fn send_response(&self, remote_addr: SocketAddr, response: UdpResponse) -> Result { - debug!("sending response to: {:?}", &remote_addr); - - let buffer = vec![0u8; MAX_PACKET_SIZE]; - let mut cursor = Cursor::new(buffer); - - match response.write_to_bytes(&mut cursor) { - Ok(_) => { - let position = cursor.position() as usize; - let inner = cursor.get_ref(); - - debug!("{:?}", &inner[..position]); - match self.send_packet(&remote_addr, &inner[..position]).await { - Ok(byte_size) => Ok(byte_size), - Err(e) => { - debug!("{:?}", e); - Err(()) - } - } - } - Err(_) => { - debug!("could not write response to bytes."); - Err(()) - } - } - } - - async fn send_packet(&self, remote_addr: &SocketAddr, payload: &[u8]) -> Result { - match self.socket.send_to(payload, remote_addr).await { - Err(err) => { - debug!("failed to send a packet: {}", err); - Err(err) - }, - Ok(sz) => Ok(sz), - } - } - - async fn send_error(&self, remote_addr: SocketAddr, transaction_id: &TransactionId, error_msg: &str) { - let error_response = UdpErrorResponse { - action: Actions::Error, - transaction_id: transaction_id.clone(), - message: error_msg.to_string(), - }; - - let response = UdpResponse::from(error_response); - - let _ = self.send_response(remote_addr, response).await; - } -} diff --git a/src/utils.rs b/src/utils.rs deleted file mode 100644 index 11c61e4fb..000000000 --- a/src/utils.rs +++ /dev/null @@ -1,31 +0,0 @@ -use std::net::SocketAddr; -use crate::common::*; -use std::time::SystemTime; -use std::error::Error; -use std::fmt::Write; - -pub fn get_connection_id(remote_address: &SocketAddr) -> ConnectionId { - match std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) { - Ok(duration) => ConnectionId(((duration.as_secs() / 3600) | ((remote_address.port() as u64) << 36)) as i64), - Err(_) => ConnectionId(0x7FFFFFFFFFFFFFFF), - } -} - -pub fn current_time() -> u64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH).unwrap() - .as_secs() -} - -pub fn url_encode_bytes(content: &[u8]) -> Result> { - let mut out: String = String::new(); - - for byte in content.iter() { - match *byte as char { - '0'..='9' | 'a'..='z' | 'A'..='Z' | '.' | '-' | '_' | '~' => out.push(*byte as char), - _ => write!(&mut out, "%{:02x}", byte)?, - }; - } - - Ok(out) -} diff --git a/tests/common/clock.rs b/tests/common/clock.rs new file mode 100644 index 000000000..de3cc7c65 --- /dev/null +++ b/tests/common/clock.rs @@ -0,0 +1,22 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; +use tracing::level_filters::LevelFilter; + +use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs new file mode 100644 index 000000000..bbdebff76 --- /dev/null +++ b/tests/common/fixtures.rs @@ -0,0 +1,12 @@ +#[allow(dead_code)] +pub fn invalid_info_hashes() -> Vec { + [ + "0".to_string(), + "-1".to_string(), + "1.1".to_string(), + "INVALID INFOHASH".to_string(), + "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40 + "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char + ] + .to_vec() +} diff --git a/tests/common/http.rs b/tests/common/http.rs new file mode 100644 index 000000000..d682027fd --- /dev/null +++ b/tests/common/http.rs @@ -0,0 +1,54 @@ +pub type ReqwestQuery = Vec; +pub type ReqwestQueryParam = (String, String); + +/// URL Query component +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +/// URL query param +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} diff --git a/tests/common/logging.rs b/tests/common/logging.rs new file mode 100644 index 000000000..71be2ece7 --- /dev/null +++ b/tests/common/logging.rs @@ -0,0 +1,30 @@ +#![allow(clippy::doc_markdown)] +//! Logging for the Integration Tests +//! +//! Tests should start their own logging. +//! +//! To find tests that do not start their own logging: +//! +//! ´´´ sh +//! awk 'BEGIN{RS=""; FS="\n"} /#\[tokio::test\]\s*async\s+fn\s+\w+\s*\(\s*\)\s*\{[^}]*\}/ && !/#\[tokio::test\]\s*async\s+fn\s+\w+\s*\(\s*\)\s*\{[^}]*INIT\.call_once/' $(find . -name "*.rs") +//! ´´´ +//! + +use std::sync::Once; + +use tracing::level_filters::LevelFilter; + +#[allow(dead_code)] +pub static INIT: Once = Once::new(); + +#[allow(dead_code)] +pub fn tracing_stderr_init(filter: LevelFilter) { + let builder = tracing_subscriber::fmt() + .with_max_level(filter) + .with_ansi(true) + .with_writer(std::io::stderr); + + builder.pretty().with_file(true).init(); + + tracing::info!("Logging initialized"); +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs new file mode 100644 index 000000000..9589ccb1e --- /dev/null +++ b/tests/common/mod.rs @@ -0,0 +1,5 @@ +pub mod clock; +pub mod fixtures; +pub mod http; +pub mod logging; +pub mod udp; diff --git a/tests/common/udp.rs b/tests/common/udp.rs new file mode 100644 index 000000000..3d84e2b97 --- /dev/null +++ b/tests/common/udp.rs @@ -0,0 +1,41 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::net::UdpSocket; + +/// A generic UDP client +pub struct Client { + pub socket: Arc, +} + +impl Client { + #[allow(dead_code)] + pub async fn connected(remote_socket_addr: &SocketAddr, local_socket_addr: &SocketAddr) -> Client { + let client = Client::bind(local_socket_addr).await; + client.connect(remote_socket_addr).await; + client + } + + pub async fn bind(local_socket_addr: &SocketAddr) -> Self { + let socket = UdpSocket::bind(local_socket_addr).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } + + pub async fn connect(&self, remote_address: &SocketAddr) { + self.socket.connect(remote_address).await.unwrap(); + } + + #[allow(dead_code)] + pub async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + #[allow(dead_code)] + pub async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } +} diff --git a/tests/integration.rs b/tests/integration.rs new file mode 100644 index 000000000..8e3d46826 --- /dev/null +++ b/tests/integration.rs @@ -0,0 +1,20 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +use torrust_tracker_clock::clock; +mod common; +mod servers; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/tests/servers/api/connection_info.rs b/tests/servers/api/connection_info.rs new file mode 100644 index 000000000..35314a2fd --- /dev/null +++ b/tests/servers/api/connection_info.rs @@ -0,0 +1,29 @@ +pub fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::authenticated(bind_address, "invalid token") +} + +pub fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::anonymous(bind_address) +} + +#[derive(Clone)] +pub struct ConnectionInfo { + pub bind_address: String, + pub api_token: Option, +} + +impl ConnectionInfo { + pub fn authenticated(bind_address: &str, api_token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: Some(api_token.to_string()), + } + } + + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: None, + } + } +} diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs new file mode 100644 index 000000000..2f4606be7 --- /dev/null +++ b/tests/servers/api/environment.rs @@ -0,0 +1,96 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use futures::executor::block_on; +use torrust_tracker::bootstrap::app::initialize_with_configuration; +use torrust_tracker::bootstrap::jobs::make_rust_tls; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + +use super::connection_info::ConnectionInfo; + +pub struct Environment +where + S: std::fmt::Debug + std::fmt::Display, +{ + pub config: Arc, + pub tracker: Arc, + pub registar: Registar, + pub server: ApiServer, +} + +impl Environment +where + S: std::fmt::Debug + std::fmt::Display, +{ + /// Add a torrent to the tracker + pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.tracker.upsert_peer_and_get_stats(info_hash, peer); + } +} + +impl Environment { + pub fn new(configuration: &Arc) -> Self { + let tracker = initialize_with_configuration(configuration); + + let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); + + let bind_to = config.bind_address; + + let tls = block_on(make_rust_tls(&config.tsl_config)).map(|tls| tls.expect("tls config failed")); + + let server = ApiServer::new(Launcher::new(bind_to, tls)); + + Self { + config, + tracker, + registar: Registar::default(), + server, + } + } + + pub async fn start(self) -> Environment { + let access_tokens = Arc::new(self.config.access_tokens.clone()); + + Environment { + config: self.config, + tracker: self.tracker.clone(), + registar: self.registar.clone(), + server: self + .server + .start(self.tracker, self.registar.give_form(), access_tokens) + .await + .unwrap(), + } + } +} + +impl Environment { + pub async fn new(configuration: &Arc) -> Self { + Environment::::new(configuration).start().await + } + + pub async fn stop(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker, + registar: Registar::default(), + server: self.server.stop().await.unwrap(), + } + } + + pub fn get_connection_info(&self) -> ConnectionInfo { + ConnectionInfo { + bind_address: self.server.state.local_addr.to_string(), + api_token: self.config.access_tokens.get("admin").cloned(), + } + } + + pub fn bind_address(&self) -> SocketAddr { + self.server.state.local_addr + } +} diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs new file mode 100644 index 000000000..38df46e9b --- /dev/null +++ b/tests/servers/api/mod.rs @@ -0,0 +1,20 @@ +use std::sync::Arc; + +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::apis::server; + +pub mod connection_info; +pub mod environment; +pub mod v1; + +pub type Started = environment::Environment; + +/// It forces a database error by dropping all tables. +/// That makes any query fail. +/// code-review: +/// Alternatively we could: +/// - Inject a database mock in the future. +/// - Inject directly the database reference passed to the Tracker type. +pub fn force_database_error(tracker: &Arc) { + tracker.drop_database_tables().unwrap(); +} diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs new file mode 100644 index 000000000..aeecfa170 --- /dev/null +++ b/tests/servers/api/v1/asserts.rs @@ -0,0 +1,167 @@ +// code-review: should we use macros to return the exact line where the assert fails? + +use reqwest::Response; +use torrust_tracker::servers::apis::v1::context::auth_key::resources::AuthKey; +use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; + +// Resource responses + +pub async fn assert_stats(response: Response, stats: Stats) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), stats); +} + +pub async fn assert_torrent_list(response: Response, torrents: Vec) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::>().await.unwrap(), torrents); +} + +pub async fn assert_torrent_info(response: Response, torrent: Torrent) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), torrent); +} + +pub async fn assert_auth_key_utf8(response: Response) -> AuthKey { + assert_eq!(response.status(), 200); + assert_eq!( + response.headers().get("content-type").unwrap(), + "application/json; charset=utf-8" + ); + response.json::().await.unwrap() +} + +// OK response + +pub async fn assert_ok(response: Response) { + let response_status = response.status(); + let response_headers = response.headers().get("content-type").cloned().unwrap(); + let response_text = response.text().await.unwrap(); + + let details = format!( + r#" + status: ´{response_status}´ + headers: ´{response_headers:?}´ + text: ´"{response_text}"´"# + ); + + assert_eq!(response_status, 200, "details:{details}."); + assert_eq!(response_headers, "application/json", "\ndetails:{details}."); + assert_eq!(response_text, "{\"status\":\"ok\"}", "\ndetails:{details}."); +} + +// Error responses + +pub async fn assert_bad_request(response: Response, body: &str) { + assert_eq!(response.status(), 400); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), body); +} + +pub async fn assert_bad_request_with_text(response: Response, text: &str) { + assert_eq!(response.status(), 400); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert!(response.text().await.unwrap().contains(text)); +} + +pub async fn assert_unprocessable_content(response: Response, text: &str) { + assert_eq!(response.status(), 422); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert!(response.text().await.unwrap().contains(text)); +} + +pub async fn assert_not_found(response: Response) { + assert_eq!(response.status(), 404); + // todo: missing header in the response + //assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert_eq!(response.text().await.unwrap(), ""); +} + +pub async fn assert_torrent_not_known(response: Response) { + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.text().await.unwrap(), "\"torrent not known\""); +} + +pub async fn assert_invalid_infohash_param(response: Response, invalid_infohash: &str) { + assert_bad_request( + response, + &format!("Invalid URL: invalid infohash param: string \"{invalid_infohash}\", expected a 40 character long string"), + ) + .await; +} + +pub async fn assert_invalid_auth_key_get_param(response: Response, invalid_auth_key: &str) { + assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key)).await; +} + +pub async fn assert_invalid_auth_key_post_param(response: Response, invalid_auth_key: &str) { + assert_bad_request_with_text( + response, + &format!("Invalid URL: invalid auth key: string \"{}\"", &invalid_auth_key), + ) + .await; +} + +pub async fn assert_unprocessable_auth_key_duration_param(response: Response, _invalid_value: &str) { + assert_unprocessable_content( + response, + "Failed to deserialize the JSON body into the target type: seconds_valid: invalid type", + ) + .await; +} + +pub async fn assert_invalid_key_duration_param(response: Response, invalid_key_duration: &str) { + assert_bad_request( + response, + &format!("Invalid URL: Cannot parse `\"{invalid_key_duration}\"` to a `u64`"), + ) + .await; +} + +pub async fn assert_token_not_valid(response: Response) { + assert_unhandled_rejection(response, "token not valid").await; +} + +pub async fn assert_unauthorized(response: Response) { + assert_unhandled_rejection(response, "unauthorized").await; +} + +pub async fn assert_failed_to_remove_torrent_from_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to remove torrent from whitelist").await; +} + +pub async fn assert_failed_to_whitelist_torrent(response: Response) { + assert_unhandled_rejection(response, "failed to whitelist torrent").await; +} + +pub async fn assert_failed_to_reload_whitelist(response: Response) { + assert_unhandled_rejection(response, "failed to reload whitelist").await; +} + +pub async fn assert_failed_to_generate_key(response: Response) { + assert_unhandled_rejection(response, "failed to generate key").await; +} + +pub async fn assert_failed_to_delete_key(response: Response) { + assert_unhandled_rejection(response, "failed to delete key").await; +} + +pub async fn assert_failed_to_reload_keys(response: Response) { + assert_unhandled_rejection(response, "failed to reload keys").await; +} + +async fn assert_unhandled_rejection(response: Response, reason: &str) { + assert_eq!(response.status(), 500); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + + let reason_text = format!("Unhandled rejection: Err {{ reason: \"{reason}"); + let response_text = response.text().await.unwrap(); + assert!( + response_text.contains(&reason_text), + ":\n response: `\"{response_text}\"`\n does not contain: `\"{reason_text}\"`." + ); +} diff --git a/tests/servers/api/v1/client.rs b/tests/servers/api/v1/client.rs new file mode 100644 index 000000000..3d95c10ca --- /dev/null +++ b/tests/servers/api/v1/client.rs @@ -0,0 +1,138 @@ +use reqwest::Response; +use serde::Serialize; + +use crate::common::http::{Query, QueryParam, ReqwestQuery}; +use crate::servers::api::connection_info::ConnectionInfo; + +/// API Client +pub struct Client { + connection_info: ConnectionInfo, + base_path: String, +} + +impl Client { + pub fn new(connection_info: ConnectionInfo) -> Self { + Self { + connection_info, + base_path: "/api/v1/".to_string(), + } + } + + pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { + self.post_empty(&format!("key/{}", &seconds_valid)).await + } + + pub async fn add_auth_key(&self, add_key_form: AddKeyForm) -> Response { + self.post_form("keys", &add_key_form).await + } + + pub async fn delete_auth_key(&self, key: &str) -> Response { + self.delete(&format!("key/{}", &key)).await + } + + pub async fn reload_keys(&self) -> Response { + self.get("keys/reload", Query::default()).await + } + + pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { + self.post_empty(&format!("whitelist/{}", &info_hash)).await + } + + pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { + self.delete(&format!("whitelist/{}", &info_hash)).await + } + + pub async fn reload_whitelist(&self) -> Response { + self.get("whitelist/reload", Query::default()).await + } + + pub async fn get_torrent(&self, info_hash: &str) -> Response { + self.get(&format!("torrent/{}", &info_hash), Query::default()).await + } + + pub async fn get_torrents(&self, params: Query) -> Response { + self.get("torrents", params).await + } + + pub async fn get_tracker_statistics(&self) -> Response { + self.get("stats", Query::default()).await + } + + pub async fn get(&self, path: &str, params: Query) -> Response { + let mut query: Query = params; + + if let Some(token) = &self.connection_info.api_token { + query.add_param(QueryParam::new("token", token)); + }; + + self.get_request_with_query(path, query).await + } + + pub async fn post_empty(&self, path: &str) -> Response { + reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + pub async fn post_form(&self, path: &str, form: &T) -> Response { + reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .json(&form) + .send() + .await + .unwrap() + } + + async fn delete(&self, path: &str) -> Response { + reqwest::Client::new() + .delete(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + pub async fn get_request_with_query(&self, path: &str, params: Query) -> Response { + get(&self.base_url(path), Some(params)).await + } + + pub async fn get_request(&self, path: &str) -> Response { + get(&self.base_url(path), None).await + } + + fn query_with_token(&self) -> Query { + match &self.connection_info.api_token { + Some(token) => Query::params([QueryParam::new("token", token)].to_vec()), + None => Query::default(), + } + } + + fn base_url(&self, path: &str) -> String { + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + } +} + +pub async fn get(path: &str, query: Option) -> Response { + match query { + Some(params) => reqwest::Client::builder() + .build() + .unwrap() + .get(path) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap(), + None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), + } +} + +#[derive(Serialize, Debug)] +pub struct AddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: Option, +} diff --git a/tests/servers/api/v1/contract/authentication.rs b/tests/servers/api/v1/contract/authentication.rs new file mode 100644 index 000000000..5c5cd3ae0 --- /dev/null +++ b/tests/servers/api/v1/contract/authentication.rs @@ -0,0 +1,105 @@ +use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; + +use crate::common::http::{Query, QueryParam}; +use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::Started; + +#[tokio::test] +async fn should_authenticate_requests_by_using_a_token_query_param() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let token = env.get_connection_info().api_token.unwrap(); + + let response = Client::new(env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) + .await; + + assert_eq!(response.status(), 200); + + env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_missing() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(env.get_connection_info()) + .get_request_with_query("stats", Query::default()) + .await; + + assert_unauthorized(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_empty() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) + .await; + + assert_token_not_valid(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_not_authenticate_requests_when_the_token_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(env.get_connection_info()) + .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) + .await; + + assert_token_not_valid(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let token = env.get_connection_info().api_token.unwrap(); + + // At the beginning of the query component + let response = Client::new(env.get_connection_info()) + .get_request(&format!("torrents?token={token}&limit=1")) + .await; + + assert_eq!(response.status(), 200); + + // At the end of the query component + let response = Client::new(env.get_connection_info()) + .get_request(&format!("torrents?limit=1&token={token}")) + .await; + + assert_eq!(response.status(), 200); + + env.stop().await; +} diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs new file mode 100644 index 000000000..be42f16ad --- /dev/null +++ b/tests/servers/api/v1/contract/configuration.rs @@ -0,0 +1,41 @@ +// use std::sync::Arc; + +// use axum_server::tls_rustls::RustlsConfig; +// use futures::executor::block_on; +// use torrust_tracker_test_helpers::configuration; + +// use crate::common::app::setup_with_configuration; +// use crate::servers::api::environment::stopped_environment; + +use tracing::level_filters::LevelFilter; + +use crate::common::logging::{tracing_stderr_init, INIT}; + +#[tokio::test] +#[ignore] +#[should_panic = "Could not receive bind_address."] +async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + // let tracker = setup_with_configuration(&Arc::new(configuration::ephemeral())); + + // let config = tracker.config.http_api.clone(); + + // let bind_to = config + // .bind_address + // .parse::() + // .expect("Tracker API bind_address invalid."); + + // let tls = + // if let (true, Some(cert), Some(key)) = (&true, &Some("bad cert path".to_string()), &Some("bad cert path".to_string())) { + // Some(block_on(RustlsConfig::from_pem_file(cert, key)).expect("Could not read tls cert.")) + // } else { + // None + // }; + + // let env = new_stopped(tracker, bind_to, tls); + + // env.start().await; +} diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs new file mode 100644 index 000000000..2792a513c --- /dev/null +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -0,0 +1,494 @@ +use std::time::Duration; + +use serde::Serialize; +use torrust_tracker::core::auth::Key; +use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; + +use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::v1::asserts::{ + assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, + assert_invalid_auth_key_get_param, assert_invalid_auth_key_post_param, assert_ok, assert_token_not_valid, + assert_unauthorized, assert_unprocessable_auth_key_duration_param, +}; +use crate::servers::api::v1::client::{AddKeyForm, Client}; +use crate::servers::api::{force_database_error, Started}; + +#[tokio::test] +async fn should_allow_generating_a_new_random_auth_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(env.get_connection_info()) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }) + .await; + + let auth_key_resource = assert_auth_key_utf8(response).await; + + assert!(env + .tracker + .authenticate(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_uploading_a_preexisting_auth_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(env.get_connection_info()) + .add_auth_key(AddKeyForm { + opt_key: Some("Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z5".to_string()), + seconds_valid: Some(60), + }) + .await; + + let auth_key_resource = assert_auth_key_utf8(response).await; + + assert!(env + .tracker + .authenticate(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); + + env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }) + .await; + + assert_unauthorized(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_auth_key_cannot_be_generated() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + force_database_error(&env.tracker); + + let response = Client::new(env.get_connection_info()) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }) + .await; + + assert_failed_to_generate_key(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_deleting_an_auth_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + let auth_key = env + .tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .await + .unwrap(); + + let response = Client::new(env.get_connection_info()) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_ok(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_generating_a_new_auth_key_when_the_provided_key_is_invalid() { + #[derive(Serialize, Debug)] + pub struct InvalidAddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: u64, + } + + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_keys = [ + // "", it returns 404 + // " ", it returns 404 + "-1", // Not a string + "invalid", // Invalid string + "GQEs2ZNcCm9cwEV9dBpcPB5OwNFWFiR", // Not a 32-char string + "%QEs2ZNcCm9cwEV9dBpcPB5OwNFWFiRd", // Invalid char. + ]; + + for invalid_key in invalid_keys { + let response = Client::new(env.get_connection_info()) + .post_form( + "keys", + &InvalidAddKeyForm { + opt_key: Some(invalid_key.to_string()), + seconds_valid: 60, + }, + ) + .await; + + assert_invalid_auth_key_post_param(response, invalid_key).await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + #[derive(Serialize, Debug)] + pub struct InvalidAddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: String, + } + + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; + + for invalid_key_duration in invalid_key_durations { + let response = Client::new(env.get_connection_info()) + .post_form( + "keys", + &InvalidAddKeyForm { + opt_key: None, + seconds_valid: invalid_key_duration.to_string(), + }, + ) + .await; + + assert_unprocessable_auth_key_duration_param(response, invalid_key_duration).await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_auth_keys = [ + // "", it returns a 404 + // " ", it returns a 404 + "0", + "-1", + "INVALID AUTH KEY ID", + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8", // 32 char key cspell:disable-line + "IrweYtVuQPGbG9Jzx1DihcPmJGGpVy8zs", // 34 char key cspell:disable-line + ]; + + for invalid_auth_key in &invalid_auth_keys { + let response = Client::new(env.get_connection_info()).delete_auth_key(invalid_auth_key).await; + + assert_invalid_auth_key_get_param(response, invalid_auth_key).await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_auth_key_cannot_be_deleted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + let auth_key = env + .tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .await + .unwrap(); + + force_database_error(&env.tracker); + + let response = Client::new(env.get_connection_info()) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_failed_to_delete_key(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + + // Generate new auth key + let auth_key = env + .tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_token_not_valid(response).await; + + // Generate new auth key + let auth_key = env + .tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .await + .unwrap(); + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .delete_auth_key(&auth_key.key.to_string()) + .await; + + assert_unauthorized(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_reloading_keys() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + env.tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .await + .unwrap(); + + let response = Client::new(env.get_connection_info()).reload_keys().await; + + assert_ok(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_keys_cannot_be_reloaded() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + env.tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .await + .unwrap(); + + force_database_error(&env.tracker); + + let response = Client::new(env.get_connection_info()).reload_keys().await; + + assert_failed_to_reload_keys(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_reloading_keys_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + env.tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .await + .unwrap(); + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .reload_keys() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .reload_keys() + .await; + + assert_unauthorized(response).await; + + env.stop().await; +} + +mod deprecated_generate_key_endpoint { + + use torrust_tracker::core::auth::Key; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::servers::api::v1::asserts::{ + assert_auth_key_utf8, assert_failed_to_generate_key, assert_invalid_key_duration_param, assert_token_not_valid, + assert_unauthorized, + }; + use crate::servers::api::v1::client::Client; + use crate::servers::api::{force_database_error, Started}; + + #[tokio::test] + async fn should_allow_generating_a_new_auth_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + + let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + + let auth_key_resource = assert_auth_key_utf8(response).await; + + assert!(env + .tracker + .authenticate(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); + + env.stop().await; + } + + #[tokio::test] + async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; + + for invalid_key_duration in invalid_key_durations { + let response = Client::new(env.get_connection_info()) + .post_empty(&format!("key/{invalid_key_duration}")) + .await; + + assert_invalid_key_duration_param(response, invalid_key_duration).await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_auth_key_cannot_be_generated() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + force_database_error(&env.tracker); + + let seconds_valid = 60; + let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + + assert_failed_to_generate_key(response).await; + + env.stop().await; + } +} diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs new file mode 100644 index 000000000..af46a5abe --- /dev/null +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -0,0 +1,26 @@ +use torrust_tracker::servers::apis::v1::context::health_check::resources::{Report, Status}; +use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; + +use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::servers::api::v1::client::get; +use crate::servers::api::Started; + +#[tokio::test] +async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let url = format!("http://{}/api/health_check", env.get_connection_info().bind_address); + + let response = get(&url, None).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); + + env.stop().await; +} diff --git a/tests/servers/api/v1/contract/context/mod.rs b/tests/servers/api/v1/contract/context/mod.rs new file mode 100644 index 000000000..032e13b0b --- /dev/null +++ b/tests/servers/api/v1/contract/context/mod.rs @@ -0,0 +1,5 @@ +pub mod auth_key; +pub mod health_check; +pub mod stats; +pub mod torrent; +pub mod whitelist; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs new file mode 100644 index 000000000..a034a7778 --- /dev/null +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -0,0 +1,77 @@ +use std::str::FromStr; + +use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; +use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; + +use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::Started; + +#[tokio::test] +async fn should_allow_getting_tracker_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + env.add_torrent_peer( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &PeerBuilder::default().into(), + ); + + let response = Client::new(env.get_connection_info()).get_tracker_statistics().await; + + assert_stats( + response, + Stats { + torrents: 1, + seeders: 1, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }, + ) + .await; + + env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .get_tracker_statistics() + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .get_tracker_statistics() + .await; + + assert_unauthorized(response).await; + + env.stop().await; +} diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs new file mode 100644 index 000000000..f5e930be3 --- /dev/null +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -0,0 +1,353 @@ +use std::str::FromStr; + +use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; +use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; +use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; + +use crate::common::http::{Query, QueryParam}; +use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::v1::asserts::{ + assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, + assert_torrent_list, assert_torrent_not_known, assert_unauthorized, +}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::contract::fixtures::{ + invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, +}; +use crate::servers::api::Started; + +#[tokio::test] +async fn should_allow_getting_all_torrents() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + + let response = Client::new(env.get_connection_info()).get_torrents(Query::empty()).await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + }], + ) + .await; + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_limiting_the_torrents_in_the_result() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) + .await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + }], + ) + .await; + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_the_torrents_result_pagination() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + // torrents are ordered alphabetically by infohashes + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) + .await; + + assert_torrent_list( + response, + vec![torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + }], + ) + .await; + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 + + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params( + [ + QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 + QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 + ] + .to_vec(), + )) + .await; + + assert_torrent_list( + response, + vec![ + torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 + seeders: 1, + completed: 0, + leechers: 0, + }, + torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 + seeders: 1, + completed: 0, + leechers: 0, + }, + ], + ) + .await; + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; + + for invalid_offset in &invalid_offsets { + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; + + for invalid_limit in &invalid_limits { + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .await; + + assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_info_hash_parameter_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_info_hashes = [" ", "-1", "1.1", "INVALID INFO_HASH"]; + + for invalid_info_hash in &invalid_info_hashes { + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("info_hash", invalid_info_hash)].to_vec())) + .await; + + assert_bad_request( + response, + &format!("Invalid URL: invalid infohash param: string \"{invalid_info_hash}\", expected a 40 character long string"), + ) + .await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_torrents_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .get_torrents(Query::empty()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .get_torrents(Query::default()) + .await; + + assert_unauthorized(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_getting_a_torrent_info() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let peer = PeerBuilder::default().into(); + + env.add_torrent_peer(&info_hash, &peer); + + let response = Client::new(env.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_info( + response, + Torrent { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + seeders: 1, + completed: 0, + leechers: 0, + peers: Some(vec![Peer::from(peer)]), + }, + ) + .await; + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + let response = Client::new(env.get_connection_info()) + .get_torrent(&info_hash.to_string()) + .await; + + assert_torrent_not_known(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(env.get_connection_info()).get_torrent(invalid_infohash).await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(env.get_connection_info()).get_torrent(invalid_infohash).await; + + assert_not_found(response).await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .get_torrent(&info_hash.to_string()) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .get_torrent(&info_hash.to_string()) + .await; + + assert_unauthorized(response).await; + + env.stop().await; +} diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs new file mode 100644 index 000000000..b30a7dbf8 --- /dev/null +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -0,0 +1,300 @@ +use std::str::FromStr; + +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; + +use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::servers::api::v1::asserts::{ + assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, + assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, +}; +use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::contract::fixtures::{ + invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, +}; +use crate::servers::api::{force_database_error, Started}; + +#[tokio::test] +async fn should_allow_whitelisting_a_torrent() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(env.get_connection_info()).whitelist_a_torrent(&info_hash).await; + + assert_ok(response).await; + assert!( + env.tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await + ); + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let api_client = Client::new(env.get_connection_info()); + + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; + + let response = api_client.whitelist_a_torrent(&info_hash).await; + assert_ok(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .whitelist_a_torrent(&info_hash) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .whitelist_a_torrent(&info_hash) + .await; + + assert_unauthorized(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_torrent_cannot_be_whitelisted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + force_database_error(&env.tracker); + + let response = Client::new(env.get_connection_info()).whitelist_a_torrent(&info_hash).await; + + assert_failed_to_whitelist_torrent(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(env.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(env.get_connection_info()) + .whitelist_a_torrent(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_removing_a_torrent_from_the_whitelist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(env.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_ok(response).await; + assert!(!env.tracker.is_info_hash_whitelisted(&info_hash).await); + + env.stop().await; +} + +#[tokio::test] +async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + + let response = Client::new(env.get_connection_info()) + .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) + .await; + + assert_ok(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let response = Client::new(env.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_invalid_infohash_param(response, invalid_infohash).await; + } + + for invalid_infohash in &invalid_infohashes_returning_not_found() { + let response = Client::new(env.get_connection_info()) + .remove_torrent_from_whitelist(invalid_infohash) + .await; + + assert_not_found(response).await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&env.tracker); + + let response = Client::new(env.get_connection_info()) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_failed_to_remove_torrent_from_whitelist(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_token_not_valid(response).await; + + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .remove_torrent_from_whitelist(&hash) + .await; + + assert_unauthorized(response).await; + + env.stop().await; +} + +#[tokio::test] +async fn should_allow_reload_the_whitelist_from_the_database() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let response = Client::new(env.get_connection_info()).reload_whitelist().await; + + assert_ok(response).await; + /* todo: this assert fails because the whitelist has not been reloaded yet. + We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent + is whitelisted and use that endpoint to check if the torrent is still there after reloading. + assert!( + !(env + .tracker + .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + .await) + ); + */ + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = InfoHash::from_str(&hash).unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + force_database_error(&env.tracker); + + let response = Client::new(env.get_connection_info()).reload_whitelist().await; + + assert_failed_to_reload_whitelist(response).await; + + env.stop().await; +} diff --git a/tests/servers/api/v1/contract/fixtures.rs b/tests/servers/api/v1/contract/fixtures.rs new file mode 100644 index 000000000..6d147f190 --- /dev/null +++ b/tests/servers/api/v1/contract/fixtures.rs @@ -0,0 +1,13 @@ +use crate::common::fixtures::invalid_info_hashes; + +// When these infohashes are used in URL path params +// the response is a custom response returned in the handler +pub fn invalid_infohashes_returning_bad_request() -> Vec { + invalid_info_hashes() +} + +// When these infohashes are used in URL path params +// the response is an Axum response returned in the handler +pub fn invalid_infohashes_returning_not_found() -> Vec { + [String::new(), " ".to_string()].to_vec() +} diff --git a/tests/servers/api/v1/contract/mod.rs b/tests/servers/api/v1/contract/mod.rs new file mode 100644 index 000000000..38b4a2b37 --- /dev/null +++ b/tests/servers/api/v1/contract/mod.rs @@ -0,0 +1,4 @@ +pub mod authentication; +pub mod configuration; +pub mod context; +pub mod fixtures; diff --git a/tests/servers/api/v1/mod.rs b/tests/servers/api/v1/mod.rs new file mode 100644 index 000000000..37298b377 --- /dev/null +++ b/tests/servers/api/v1/mod.rs @@ -0,0 +1,3 @@ +pub mod asserts; +pub mod client; +pub mod contract; diff --git a/tests/servers/health_check_api/client.rs b/tests/servers/health_check_api/client.rs new file mode 100644 index 000000000..3d8bdc7d6 --- /dev/null +++ b/tests/servers/health_check_api/client.rs @@ -0,0 +1,5 @@ +use reqwest::Response; + +pub async fn get(path: &str) -> Response { + reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap() +} diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs new file mode 100644 index 000000000..d40899f98 --- /dev/null +++ b/tests/servers/health_check_api/contract.rs @@ -0,0 +1,363 @@ +use torrust_tracker::servers::health_check_api::resources::{Report, Status}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; + +use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::servers::health_check_api::client::get; +use crate::servers::health_check_api::Started; + +#[tokio::test] +async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services_registered() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let configuration = configuration::ephemeral_with_no_services(); + + let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report = response + .json::() + .await + .expect("it should be able to get the report as json"); + + assert_eq!(report.status, Status::None); + + env.stop().await.expect("it should stop the service"); +} + +mod api { + use std::sync::Arc; + + use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::api; + use crate::servers::health_check_api::client::get; + use crate::servers::health_check_api::Started; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_api_service() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = api::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, service.bind_address()); + + assert_eq!(details.result, Ok("200 OK".to_string())); + + assert_eq!( + details.info, + format!( + "checking api health check at: http://{}/api/health_check", + service.bind_address() + ) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_api_service_was_stopped_after_registration() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = api::Started::new(&configuration).await; + + let binding = service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert!( + details + .result + .as_ref() + .is_err_and(|e| e.contains("error sending request for url")), + "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result + ); + assert_eq!( + details.info, + format!("checking api health check at: http://{binding}/api/health_check") + ); + + env.stop().await.expect("it should stop the service"); + } + } +} + +mod http { + use std::sync::Arc; + + use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::health_check_api::client::get; + use crate::servers::health_check_api::Started; + use crate::servers::http; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_http_service() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = http::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, *service.bind_address()); + assert_eq!(details.result, Ok("200 OK".to_string())); + + assert_eq!( + details.info, + format!( + "checking http tracker health check at: http://{}/health_check", + service.bind_address() + ) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_http_service_was_stopped_after_registration() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = http::Started::new(&configuration).await; + + let binding = *service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert!( + details + .result + .as_ref() + .is_err_and(|e| e.contains("error sending request for url")), + "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result + ); + assert_eq!( + details.info, + format!("checking http tracker health check at: http://{binding}/health_check") + ); + + env.stop().await.expect("it should stop the service"); + } + } +} + +mod udp { + use std::sync::Arc; + + use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::health_check_api::client::get; + use crate::servers::health_check_api::Started; + use crate::servers::udp; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_udp_service() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = udp::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, service.bind_address()); + assert_eq!(details.result, Ok("Connected".to_string())); + + assert_eq!( + details.info, + format!("checking the udp tracker health check at: {}", service.bind_address()) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_udp_service_was_stopped_after_registration() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = udp::Started::new(&configuration).await; + + let binding = service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert_eq!(details.result, Err("Timed Out".to_string())); + assert_eq!(details.info, format!("checking the udp tracker health check at: {binding}")); + + env.stop().await.expect("it should stop the service"); + } + } +} diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs new file mode 100644 index 000000000..b101a54e7 --- /dev/null +++ b/tests/servers/health_check_api/environment.rs @@ -0,0 +1,98 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::sync::oneshot::{self, Sender}; +use tokio::task::JoinHandle; +use torrust_tracker::bootstrap::jobs::Started; +use torrust_tracker::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker::servers::signals::{self, Halted}; +use torrust_tracker_configuration::HealthCheckApi; + +#[derive(Debug)] +pub enum Error { + #[allow(dead_code)] + Error(String), +} + +pub struct Running { + pub binding: SocketAddr, + pub halt_task: Sender, + pub task: JoinHandle, +} + +pub struct Stopped { + pub bind_to: SocketAddr, +} + +pub struct Environment { + pub registar: Registar, + pub state: S, +} + +impl Environment { + pub fn new(config: &Arc, registar: Registar) -> Self { + let bind_to = config.bind_address; + + Self { + registar, + state: Stopped { bind_to }, + } + } + + /// Start the test environment for the Health Check API. + /// It runs the API server. + pub async fn start(self) -> Environment { + let (tx_start, rx_start) = oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + let register = self.registar.entries(); + + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Spawning task to launch the service ..."); + + let server = tokio::spawn(async move { + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting the server in a spawned task ..."); + + server::start(self.state.bind_to, tx_start, rx_halt, register) + .await + .expect("it should start the health check service"); + + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Server started. Sending the binding {} ...", self.state.bind_to); + + self.state.bind_to + }); + + tracing::debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Waiting for spawning task to send the binding ..."); + + let binding = rx_start.await.expect("it should send service binding").address; + + Environment { + registar: self.registar.clone(), + state: Running { + task: server, + halt_task: tx_halt, + binding, + }, + } + } +} + +impl Environment { + pub async fn new(config: &Arc, registar: Registar) -> Self { + Environment::::new(config, registar).start().await + } + + pub async fn stop(self) -> Result, Error> { + self.state + .halt_task + .send(Halted::Normal) + .map_err(|e| Error::Error(e.to_string()))?; + + let bind_to = self.state.task.await.expect("it should shutdown the service"); + + Ok(Environment { + registar: self.registar.clone(), + state: Stopped { bind_to }, + }) + } +} diff --git a/tests/servers/health_check_api/mod.rs b/tests/servers/health_check_api/mod.rs new file mode 100644 index 000000000..9e15c5f62 --- /dev/null +++ b/tests/servers/health_check_api/mod.rs @@ -0,0 +1,5 @@ +pub mod client; +pub mod contract; +pub mod environment; + +pub type Started = environment::Environment; diff --git a/tests/servers/http/asserts.rs b/tests/servers/http/asserts.rs new file mode 100644 index 000000000..3a2e67bf0 --- /dev/null +++ b/tests/servers/http/asserts.rs @@ -0,0 +1,145 @@ +use std::panic::Location; + +use reqwest::Response; + +use super::responses::announce::{Announce, Compact, DeserializedCompact}; +use super::responses::scrape; +use crate::servers::http::responses::error::Error; + +pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { + let error_failure_reason = serde_bencode::from_str::(response_text) + .unwrap_or_else(|_| panic!( + "response body should be a valid bencoded string for the '{expected_failure_reason}' error, got \"{response_text}\"" + ) + ) + .failure_reason; + + assert!( + error_failure_reason.contains(expected_failure_reason), + r#": + response: `"{error_failure_reason}"` + does not contain: `"{expected_failure_reason}"`, {location}"# + ); +} + +pub async fn assert_empty_announce_response(response: Response) { + assert_eq!(response.status(), 200); + let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); + assert!(announce_response.peers.is_empty()); +} + +pub async fn assert_announce_response(response: Response, expected_announce_response: &Announce) { + assert_eq!(response.status(), 200); + + let body = response.bytes().await.unwrap(); + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{:#?}\"", &body)); + + assert_eq!(announce_response, *expected_announce_response); +} + +pub async fn assert_compact_announce_response(response: Response, expected_response: &Compact) { + assert_eq!(response.status(), 200); + + let bytes = response.bytes().await.unwrap(); + + let compact_announce = DeserializedCompact::from_bytes(&bytes).unwrap_or_else(|_| { + panic!( + "response body should be a valid compact announce response, got \"{:?}\"", + &bytes + ) + }); + + let actual_response = Compact::from(compact_announce); + + assert_eq!(actual_response, *expected_response); +} + +/// Sample bencoded scrape response as byte array: +/// +/// ```text +/// b"d5:filesd20:\x9c8B\"\x13\xe3\x0b\xff!+0\xc3`\xd2o\x9a\x02\x13d\"d8:completei1e10:downloadedi0e10:incompletei0eeee" +/// ``` +pub async fn assert_scrape_response(response: Response, expected_response: &scrape::Response) { + assert_eq!(response.status(), 200); + + let scrape_response = scrape::Response::try_from_bencoded(&response.bytes().await.unwrap()).unwrap(); + + assert_eq!(scrape_response, *expected_response); +} + +pub async fn assert_is_announce_response(response: Response) { + assert_eq!(response.status(), 200); + let body = response.text().await.unwrap(); + let _announce_response: Announce = serde_bencode::from_str(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got \"{}\"", &body)); +} + +// Error responses + +// Specific errors for announce request + +pub async fn assert_missing_query_params_for_announce_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for announce request", + Location::caller(), + ); +} + +pub async fn assert_bad_announce_request_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(" for announce request: {failure}")).await; +} + +// Specific errors for scrape request + +pub async fn assert_missing_query_params_for_scrape_request_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing query params for scrape request", + Location::caller(), + ); +} + +// Other errors + +pub async fn assert_torrent_not_in_whitelist_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error(&response.text().await.unwrap(), "is not whitelisted", Location::caller()); +} + +pub async fn assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration)", + Location::caller(), + ); +} + +pub async fn assert_cannot_parse_query_param_error_response(response: Response, failure: &str) { + assert_cannot_parse_query_params_error_response(response, &format!(": {failure}")).await; +} + +pub async fn assert_cannot_parse_query_params_error_response(response: Response, failure: &str) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + &format!("Cannot parse query params{failure}"), + Location::caller(), + ); +} + +pub async fn assert_authentication_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error(&response.text().await.unwrap(), "Authentication error", Location::caller()); +} diff --git a/tests/servers/http/client.rs b/tests/servers/http/client.rs new file mode 100644 index 000000000..288987c55 --- /dev/null +++ b/tests/servers/http/client.rs @@ -0,0 +1,103 @@ +use std::net::IpAddr; + +use reqwest::{Client as ReqwestClient, Response}; +use torrust_tracker::core::auth::Key; + +use super::requests::announce::{self, Query}; +use super::requests::scrape; + +/// HTTP Tracker Client +pub struct Client { + server_addr: std::net::SocketAddr, + reqwest: ReqwestClient, + key: Option, +} + +/// URL components in this context: +/// +/// ```text +/// http://127.0.0.1:62304/announce/YZ....rJ?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// \_____________________/\_______________/ \__________________________________________________________/ +/// | | | +/// base url path query +/// ``` +impl Client { + pub fn new(server_addr: std::net::SocketAddr) -> Self { + Self { + server_addr, + reqwest: reqwest::Client::builder().build().unwrap(), + key: None, + } + } + + /// Creates the new client binding it to an specific local address + pub fn bind(server_addr: std::net::SocketAddr, local_address: IpAddr) -> Self { + Self { + server_addr, + reqwest: reqwest::Client::builder().local_address(local_address).build().unwrap(), + key: None, + } + } + + pub fn authenticated(server_addr: std::net::SocketAddr, key: Key) -> Self { + Self { + server_addr, + reqwest: reqwest::Client::builder().build().unwrap(), + key: Some(key), + } + } + + pub async fn announce(&self, query: &announce::Query) -> Response { + self.get(&self.build_announce_path_and_query(query)).await + } + + pub async fn scrape(&self, query: &scrape::Query) -> Response { + self.get(&self.build_scrape_path_and_query(query)).await + } + + pub async fn announce_with_header(&self, query: &Query, key: &str, value: &str) -> Response { + self.get_with_header(&self.build_announce_path_and_query(query), key, value) + .await + } + + pub async fn health_check(&self) -> Response { + self.get(&self.build_path("health_check")).await + } + + pub async fn get(&self, path: &str) -> Response { + self.reqwest.get(self.build_url(path)).send().await.unwrap() + } + + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { + self.reqwest + .get(self.build_url(path)) + .header(key, value) + .send() + .await + .unwrap() + } + + fn build_announce_path_and_query(&self, query: &announce::Query) -> String { + format!("{}?{query}", self.build_path("announce")) + } + + fn build_scrape_path_and_query(&self, query: &scrape::Query) -> String { + format!("{}?{query}", self.build_path("scrape")) + } + + fn build_path(&self, path: &str) -> String { + match &self.key { + Some(key) => format!("{path}/{key}"), + None => path.to_string(), + } + } + + fn build_url(&self, path: &str) -> String { + let base_url = self.base_url(); + format!("{base_url}{path}") + } + + fn base_url(&self) -> String { + format!("http://{}/", &self.server_addr) + } +} diff --git a/tests/servers/http/connection_info.rs b/tests/servers/http/connection_info.rs new file mode 100644 index 000000000..f4081d60e --- /dev/null +++ b/tests/servers/http/connection_info.rs @@ -0,0 +1,16 @@ +use torrust_tracker::core::auth::Key; + +#[derive(Clone, Debug)] +pub struct ConnectionInfo { + pub bind_address: String, + pub key: Option, +} + +impl ConnectionInfo { + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + key: None, + } + } +} diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs new file mode 100644 index 000000000..b6bb21c16 --- /dev/null +++ b/tests/servers/http/environment.rs @@ -0,0 +1,82 @@ +use std::sync::Arc; + +use futures::executor::block_on; +use torrust_tracker::bootstrap::app::initialize_with_configuration; +use torrust_tracker::bootstrap::jobs::make_rust_tls; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker_configuration::{Configuration, HttpTracker}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + +pub struct Environment { + pub config: Arc, + pub tracker: Arc, + pub registar: Registar, + pub server: HttpServer, +} + +impl Environment { + /// Add a torrent to the tracker + pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.tracker.upsert_peer_and_get_stats(info_hash, peer); + } +} + +impl Environment { + #[allow(dead_code)] + pub fn new(configuration: &Arc) -> Self { + let tracker = initialize_with_configuration(configuration); + + let http_tracker = configuration + .http_trackers + .clone() + .expect("missing HTTP tracker configuration"); + + let config = Arc::new(http_tracker[0].clone()); + + let bind_to = config.bind_address; + + let tls = block_on(make_rust_tls(&config.tsl_config)).map(|tls| tls.expect("tls config failed")); + + let server = HttpServer::new(Launcher::new(bind_to, tls)); + + Self { + config, + tracker, + registar: Registar::default(), + server, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker.clone(), + registar: self.registar.clone(), + server: self.server.start(self.tracker, self.registar.give_form()).await.unwrap(), + } + } +} + +impl Environment { + pub async fn new(configuration: &Arc) -> Self { + Environment::::new(configuration).start().await + } + + pub async fn stop(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker, + registar: Registar::default(), + + server: self.server.stop().await.unwrap(), + } + } + + pub fn bind_address(&self) -> &std::net::SocketAddr { + &self.server.state.binding + } +} diff --git a/tests/servers/http/mod.rs b/tests/servers/http/mod.rs new file mode 100644 index 000000000..65affc433 --- /dev/null +++ b/tests/servers/http/mod.rs @@ -0,0 +1,31 @@ +pub mod asserts; +pub mod client; +pub mod environment; +pub mod requests; +pub mod responses; +pub mod v1; + +pub type Started = environment::Environment; + +use percent_encoding::NON_ALPHANUMERIC; +use torrust_tracker::servers::http::server; + +pub type ByteArray20 = [u8; 20]; + +pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { + percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() +} + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs new file mode 100644 index 000000000..fa20553d0 --- /dev/null +++ b/tests/servers/http/requests/announce.rs @@ -0,0 +1,272 @@ +use std::fmt; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; + +use aquatic_udp_protocol::PeerId; +use serde_repr::Serialize_repr; +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::servers::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: ByteArray20, + pub peer_addr: IpAddr, + pub downloaded: BaseTenASCII, + pub uploaded: BaseTenASCII, + pub peer_id: ByteArray20, + pub port: PortNumber, + pub left: BaseTenASCII, + pub event: Option, + pub compact: Option, + pub numwant: Option, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Announce Request: +/// +/// +/// +/// Some parameters in the specification are not implemented in this tracker yet. +impl Query { + /// It builds the URL query component for the announce request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + pub fn build(&self) -> String { + self.params().to_string() + } + + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub type BaseTenASCII = u64; +pub type PortNumber = u16; + +pub enum Event { + //Started, + //Stopped, + Completed, +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + //Event::Started => write!(f, "started"), + //Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(Serialize_repr, PartialEq, Debug)] +#[repr(u8)] +pub enum Compact { + Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} + +pub struct QueryBuilder { + announce_query: Query, +} + +impl QueryBuilder { + pub fn default() -> QueryBuilder { + let default_announce_query = Query { + info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: PeerId(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + numwant: None, + }; + Self { + announce_query: default_announce_query, + } + } + + pub fn with_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.announce_query.info_hash = info_hash.0; + self + } + + pub fn with_peer_id(mut self, peer_id: &PeerId) -> Self { + self.announce_query.peer_id = peer_id.0; + self + } + + pub fn with_compact(mut self, compact: Compact) -> Self { + self.announce_query.compact = Some(compact); + self + } + + pub fn with_peer_addr(mut self, peer_addr: &IpAddr) -> Self { + self.announce_query.peer_addr = *peer_addr; + self + } + + pub fn without_compact(mut self) -> Self { + self.announce_query.compact = None; + self + } + + pub fn query(self) -> Query { + self.announce_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Announce request. +/// +/// Sample Announce URL with all the GET parameters (mandatory and optional): +/// +/// ```text +/// http://127.0.0.1:7070/announce? +/// info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 (mandatory) +/// peer_addr=192.168.1.88 +/// downloaded=0 +/// uploaded=0 +/// peer_id=%2DqB00000000000000000 (mandatory) +/// port=17548 (mandatory) +/// left=0 +/// event=completed +/// compact=0 +/// numwant=50 +/// ``` +#[derive(Debug)] +pub struct QueryParams { + pub info_hash: Option, + pub peer_addr: Option, + pub downloaded: Option, + pub uploaded: Option, + pub peer_id: Option, + pub port: Option, + pub left: Option, + pub event: Option, + pub compact: Option, + pub numwant: Option, +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut params = vec![]; + + if let Some(info_hash) = &self.info_hash { + params.push(("info_hash", info_hash)); + } + if let Some(peer_addr) = &self.peer_addr { + params.push(("peer_addr", peer_addr)); + } + if let Some(downloaded) = &self.downloaded { + params.push(("downloaded", downloaded)); + } + if let Some(uploaded) = &self.uploaded { + params.push(("uploaded", uploaded)); + } + if let Some(peer_id) = &self.peer_id { + params.push(("peer_id", peer_id)); + } + if let Some(port) = &self.port { + params.push(("port", port)); + } + if let Some(left) = &self.left { + params.push(("left", left)); + } + if let Some(event) = &self.event { + params.push(("event", event)); + } + if let Some(compact) = &self.compact { + params.push(("compact", compact)); + } + if let Some(numwant) = &self.numwant { + params.push(("numwant", numwant)); + } + + let query = params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(announce_query: &Query) -> Self { + let event = announce_query.event.as_ref().map(std::string::ToString::to_string); + let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); + let numwant = announce_query.numwant.map(|numwant| numwant.to_string()); + + Self { + info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), + peer_addr: Some(announce_query.peer_addr.to_string()), + downloaded: Some(announce_query.downloaded.to_string()), + uploaded: Some(announce_query.uploaded.to_string()), + peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), + port: Some(announce_query.port.to_string()), + left: Some(announce_query.left.to_string()), + event, + compact, + numwant, + } + } + + pub fn remove_optional_params(&mut self) { + // todo: make them optional with the Option<...> in the AnnounceQuery struct + // if they are really optional. So that we can crete a minimal AnnounceQuery + // instead of removing the optional params afterwards. + // + // The original specification on: + // + // says only `ip` and `event` are optional. + // + // On + // says only `ip`, `numwant`, `key` and `trackerid` are optional. + // + // but the server is responding if all these params are not included. + self.peer_addr = None; + self.downloaded = None; + self.uploaded = None; + self.left = None; + self.event = None; + self.compact = None; + self.numwant = None; + } + + pub fn set(&mut self, param_name: &str, param_value: &str) { + match param_name { + "info_hash" => self.info_hash = Some(param_value.to_string()), + "peer_addr" => self.peer_addr = Some(param_value.to_string()), + "downloaded" => self.downloaded = Some(param_value.to_string()), + "uploaded" => self.uploaded = Some(param_value.to_string()), + "peer_id" => self.peer_id = Some(param_value.to_string()), + "port" => self.port = Some(param_value.to_string()), + "left" => self.left = Some(param_value.to_string()), + "event" => self.event = Some(param_value.to_string()), + "compact" => self.compact = Some(param_value.to_string()), + "numwant" => self.numwant = Some(param_value.to_string()), + &_ => panic!("Invalid param name for announce query"), + } + } +} diff --git a/tests/servers/http/requests/mod.rs b/tests/servers/http/requests/mod.rs new file mode 100644 index 000000000..776d2dfbf --- /dev/null +++ b/tests/servers/http/requests/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod scrape; diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs new file mode 100644 index 000000000..f66605855 --- /dev/null +++ b/tests/servers/http/requests/scrape.rs @@ -0,0 +1,118 @@ +use std::fmt; +use std::str::FromStr; + +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::servers::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: Vec, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Scrape Request: +/// +/// +impl Query { + /// It builds the URL query component for the scrape request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + pub fn build(&self) -> String { + self.params().to_string() + } + + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub struct QueryBuilder { + scrape_query: Query, +} + +impl QueryBuilder { + pub fn default() -> QueryBuilder { + let default_scrape_query = Query { + info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), + }; + Self { + scrape_query: default_scrape_query, + } + } + + pub fn with_one_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash = [info_hash.0].to_vec(); + self + } + + pub fn add_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash.push(info_hash.0); + self + } + + pub fn query(self) -> Query { + self.scrape_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Scrape request. +/// +/// The `info_hash` param is the percent encoded of the the 20-byte array info hash. +/// +/// Sample Scrape URL with all the GET parameters: +/// +/// For `IpV4`: +/// +/// ```text +/// http://127.0.0.1:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// For `IpV6`: +/// +/// ```text +/// http://[::1]:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// You can add as many info hashes as you want, just adding the same param again. +pub struct QueryParams { + pub info_hash: Vec, +} + +impl QueryParams { + pub fn set_one_info_hash_param(&mut self, info_hash: &str) { + self.info_hash = vec![info_hash.to_string()]; + } +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let query = self + .info_hash + .iter() + .map(|info_hash| format!("info_hash={}", &info_hash)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(scrape_query: &Query) -> Self { + let info_hashes = scrape_query + .info_hash + .iter() + .map(percent_encode_byte_array) + .collect::>(); + + Self { info_hash: info_hashes } + } +} diff --git a/tests/servers/http/responses/announce.rs b/tests/servers/http/responses/announce.rs new file mode 100644 index 000000000..554e5ab40 --- /dev/null +++ b/tests/servers/http/responses/announce.rs @@ -0,0 +1,116 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::peer; +use zerocopy::AsBytes as _; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, // Peers using IPV4 and IPV6 +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DictionaryPeer { + pub ip: String, + #[serde(rename = "peer id")] + #[serde(with = "serde_bytes")] + pub peer_id: Vec, + pub port: u16, +} + +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { + DictionaryPeer { + peer_id: peer.peer_id.as_bytes().to_vec(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DeserializedCompact { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + #[serde(with = "serde_bytes")] + pub peers: Vec, +} + +impl DeserializedCompact { + pub fn from_bytes(bytes: &[u8]) -> Result { + serde_bencode::from_bytes::(bytes) + } +} + +#[derive(Debug, PartialEq)] +pub struct Compact { + // code-review: there could be a way to deserialize this struct directly + // by using serde instead of doing it manually. Or at least using a custom deserializer. + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + pub min_interval: u32, + pub peers: CompactPeerList, +} + +#[derive(Debug, PartialEq)] +pub struct CompactPeerList { + peers: Vec, +} + +impl CompactPeerList { + pub fn new(peers: Vec) -> Self { + Self { peers } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeer { + ip: Ipv4Addr, + port: u16, +} + +impl CompactPeer { + pub fn new(socket_addr: &SocketAddr) -> Self { + match socket_addr.ip() { + IpAddr::V4(ip) => Self { + ip, + port: socket_addr.port(), + }, + IpAddr::V6(_ip) => panic!("IPV6 is not supported for compact peer"), + } + } + + pub fn new_from_bytes(bytes: &[u8]) -> Self { + Self { + ip: Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), + port: u16::from_be_bytes([bytes[4], bytes[5]]), + } + } +} + +impl From for Compact { + fn from(compact_announce: DeserializedCompact) -> Self { + let mut peers = vec![]; + + for peer_bytes in compact_announce.peers.chunks_exact(6) { + peers.push(CompactPeer::new_from_bytes(peer_bytes)); + } + + Self { + complete: compact_announce.complete, + incomplete: compact_announce.incomplete, + interval: compact_announce.interval, + min_interval: compact_announce.min_interval, + peers: CompactPeerList::new(peers), + } + } +} diff --git a/tests/servers/http/responses/error.rs b/tests/servers/http/responses/error.rs new file mode 100644 index 000000000..00befdb54 --- /dev/null +++ b/tests/servers/http/responses/error.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/tests/servers/http/responses/mod.rs b/tests/servers/http/responses/mod.rs new file mode 100644 index 000000000..bdc689056 --- /dev/null +++ b/tests/servers/http/responses/mod.rs @@ -0,0 +1,3 @@ +pub mod announce; +pub mod error; +pub mod scrape; diff --git a/tests/servers/http/responses/scrape.rs b/tests/servers/http/responses/scrape.rs new file mode 100644 index 000000000..fc741cbf4 --- /dev/null +++ b/tests/servers/http/responses/scrape.rs @@ -0,0 +1,200 @@ +use std::collections::HashMap; +use std::str; + +use serde::{Deserialize, Serialize}; +use serde_bencode::value::Value; + +use crate::servers::http::{ByteArray20, InfoHash}; + +#[derive(Debug, PartialEq, Default)] +pub struct Response { + pub files: HashMap, +} + +impl Response { + pub fn with_one_file(info_hash_bytes: ByteArray20, file: File) -> Self { + let mut files: HashMap = HashMap::new(); + files.insert(info_hash_bytes, file); + Self { files } + } + + pub fn try_from_bencoded(bytes: &[u8]) -> Result { + let scrape_response: DeserializedResponse = serde_bencode::from_bytes(bytes).unwrap(); + Self::try_from(scrape_response) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +pub struct File { + pub complete: i64, // The number of active peers that have completed downloading + pub downloaded: i64, // The number of peers that have ever completed downloading + pub incomplete: i64, // The number of active peers that have not completed downloading +} + +impl File { + pub fn zeroed() -> Self { + Self::default() + } +} + +impl TryFrom for Response { + type Error = BencodeParseError; + + fn try_from(scrape_response: DeserializedResponse) -> Result { + parse_bencoded_response(&scrape_response.files) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +struct DeserializedResponse { + pub files: Value, +} + +pub struct ResponseBuilder { + response: Response, +} + +impl ResponseBuilder { + pub fn default() -> Self { + Self { + response: Response::default(), + } + } + + pub fn add_file(mut self, info_hash_bytes: ByteArray20, file: File) -> Self { + self.response.files.insert(info_hash_bytes, file); + self + } + + pub fn build(self) -> Response { + self.response + } +} + +#[derive(Debug)] +pub enum BencodeParseError { + #[allow(dead_code)] + InvalidValueExpectedDict { value: Value }, + #[allow(dead_code)] + InvalidValueExpectedInt { value: Value }, + #[allow(dead_code)] + InvalidFileField { value: Value }, + #[allow(dead_code)] + MissingFileField { field_name: String }, +} + +/// It parses a bencoded scrape response into a `Response` struct. +/// +/// For example: +/// +/// ```text +/// d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e +/// 20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +/// ``` +/// +/// Response (JSON encoded for readability): +/// +/// ```text +/// { +/// 'files': { +/// 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +/// 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +/// } +/// } +fn parse_bencoded_response(value: &Value) -> Result { + let mut files: HashMap = HashMap::new(); + + match value { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = parse_bencoded_file(file_value).unwrap(); + + files.insert(InfoHash::new(info_hash_byte_vec).bytes(), file); + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + } + + Ok(Response { files }) +} + +/// It parses a bencoded dictionary into a `File` struct. +/// +/// For example: +/// +/// +/// ```text +/// d8:completei11e10:downloadedi13772e10:incompletei19ee +/// ``` +/// +/// into: +/// +/// ```text +/// File { +/// complete: 11, +/// downloaded: 13772, +/// incomplete: 19, +/// } +/// ``` +fn parse_bencoded_file(value: &Value) -> Result { + let file = match &value { + Value::Dict(dict) => { + let mut complete = None; + let mut downloaded = None; + let mut incomplete = None; + + for file_field in dict { + let field_name = file_field.0; + + let field_value = match file_field.1 { + Value::Int(number) => Ok(*number), + _ => Err(BencodeParseError::InvalidValueExpectedInt { + value: file_field.1.clone(), + }), + }?; + + if field_name == b"complete" { + complete = Some(field_value); + } else if field_name == b"downloaded" { + downloaded = Some(field_value); + } else if field_name == b"incomplete" { + incomplete = Some(field_value); + } else { + return Err(BencodeParseError::InvalidFileField { + value: file_field.1.clone(), + }); + } + } + + if complete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "complete".to_string(), + }); + } + + if downloaded.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "downloaded".to_string(), + }); + } + + if incomplete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "incomplete".to_string(), + }); + } + + File { + complete: complete.unwrap(), + downloaded: downloaded.unwrap(), + incomplete: incomplete.unwrap(), + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + }; + + Ok(file) +} diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs new file mode 100644 index 000000000..405a35dc5 --- /dev/null +++ b/tests/servers/http/v1/contract.rs @@ -0,0 +1,1708 @@ +use torrust_tracker_test_helpers::configuration; + +use crate::servers::http::Started; + +#[tokio::test] +async fn environment_should_be_started_and_stopped() { + let env = Started::new(&configuration::ephemeral().into()).await; + + env.stop().await; +} + +mod for_all_config_modes { + + use torrust_tracker::servers::http::v1::handlers::health_check::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::http::client::Client; + use crate::servers::http::Started; + + #[tokio::test] + async fn health_check_endpoint_should_return_ok_if_the_http_tracker_is_running() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; + + let response = Client::new(*env.bind_address()).health_check().await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); + + env.stop().await; + } + + mod and_running_on_reverse_proxy { + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::Started; + + #[tokio::test] + async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + // If the tracker is running behind a reverse proxy, the peer IP is the + // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. + + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; + + let params = QueryBuilder::default().query().params(); + + let response = Client::new(*env.bind_address()) + .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") + .await; + + assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; + + env.stop().await; + } + } + + mod receiving_an_announce_request { + + // Announce request documentation: + // + // BEP 03. The BitTorrent Protocol Specification + // https://www.bittorrent.org/beps/bep_0003.html + // + // BEP 23. Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Announce + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV6}; + use std::str::FromStr; + + use aquatic_udp_protocol::PeerId; + use local_ip_address::local_ip; + use reqwest::{Response, StatusCode}; + use tokio::net::TcpListener; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::fixtures::invalid_info_hashes; + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::http::asserts::{ + assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, + assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, + assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, + }; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::{Compact, QueryBuilder}; + use crate::servers::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; + use crate::servers::http::{responses, Started}; + + #[tokio::test] + async fn it_should_start_and_stop() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + env.stop().await; + } + + #[tokio::test] + async fn should_respond_if_only_the_mandatory_fields_are_provided() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + params.remove_optional_params(); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_is_announce_response(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_url_query_component_is_empty() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(*env.bind_address()).get("announce").await; + + assert_missing_query_params_for_announce_request_error_response(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_url_query_parameters_are_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_query_param = "a=b=c"; + + let response = Client::new(*env.bind_address()) + .get(&format!("announce?{invalid_query_param}")) + .await; + + assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_a_mandatory_field_is_missing() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + // Without `info_hash` param + + let mut params = QueryBuilder::default().query().params(); + + params.info_hash = None; + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param info_hash").await; + + // Without `peer_id` param + + let mut params = QueryBuilder::default().query().params(); + + params.peer_id = None; + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param peer_id").await; + + // Without `port` param + + let mut params = QueryBuilder::default().query().params(); + + params.port = None; + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "missing param port").await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set("info_hash", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_cannot_parse_query_params_error_response(response, "").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_not_fail_when_the_peer_address_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + // AnnounceQuery does not even contain the `peer_addr` + // The peer IP is obtained in two ways: + // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. + // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_is_announce_response(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_downloaded_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("downloaded", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_uploaded_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("uploaded", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_peer_id_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "-qB0000000000000000", // 19 bytes + "-qB000000000000000000", // 21 bytes + ]; + + for invalid_value in invalid_values { + params.set("peer_id", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_port_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("port", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_left_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("left", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_event_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = [ + "0", + "-1", + "1.1", + "a", + "Started", // It should be lowercase to be valid: `started` + "Stopped", // It should be lowercase to be valid: `stopped` + "Completed", // It should be lowercase to be valid: `completed` + ]; + + for invalid_value in invalid_values { + params.set("event", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_compact_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("compact", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_numwant_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + let invalid_values = ["-1", "1.1", "a"]; + + for invalid_value in invalid_values { + params.set("numwant", invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_bad_announce_request_error_response(response, "invalid param value").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let response = Client::new(*env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .query(), + ) + .await; + + let announce_policy = env.tracker.get_announce_policy(); + + assert_announce_response( + response, + &Announce { + complete: 1, // the peer for this test + incomplete: 0, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, + peers: vec![], + }, + ) + .await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); + + // Add the Peer 1 + env.add_torrent_peer(&info_hash, &previously_announced_peer); + + // Announce the new Peer 2. This new peer is non included on the response peer list + let response = Client::new(*env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .query(), + ) + .await; + + let announce_policy = env.tracker.get_announce_policy(); + + // It should only contain the previously announced peer + assert_announce_response( + response, + &Announce { + complete: 2, + incomplete: 0, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, + peers: vec![DictionaryPeer::from(previously_announced_peer)], + }, + ) + .await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Announce a peer using IPV4 + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) + .build(); + env.add_torrent_peer(&info_hash, &peer_using_ipv4); + + // Announce a peer using IPV6 + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + )) + .build(); + env.add_torrent_peer(&info_hash, &peer_using_ipv6); + + // Announce the new Peer. + let response = Client::new(*env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&PeerId(*b"-qB00000000000000003")) + .query(), + ) + .await; + + let announce_policy = env.tracker.get_announce_policy(); + + // The newly announced peer is not included on the response peer list, + // but all the previously announced peers should be included regardless the IP version they are using. + assert_announce_response( + response, + &Announce { + complete: 3, + incomplete: 0, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, + peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], + }, + ) + .await; + + env.stop().await; + } + + #[tokio::test] + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let peer = PeerBuilder::default().build(); + + // Add a peer + env.add_torrent_peer(&info_hash, &peer); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&peer.peer_id) + .query(); + + assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + + let response = Client::new(*env.bind_address()).announce(&announce_query).await; + + assert_empty_announce_response(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_the_compact_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + // Tracker Returns Compact Peer Lists + // https://www.bittorrent.org/beps/bep_0023.html + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); + + // Add the Peer 1 + env.add_torrent_peer(&info_hash, &previously_announced_peer); + + // Announce the new Peer 2 accepting compact responses + let response = Client::new(*env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_compact(Compact::Accepted) + .query(), + ) + .await; + + let expected_response = responses::announce::Compact { + complete: 2, + incomplete: 0, + interval: 120, + min_interval: 120, + peers: CompactPeerList::new([CompactPeer::new(&previously_announced_peer.peer_addr)].to_vec()), + }; + + assert_compact_announce_response(response, &expected_response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_not_return_the_compact_response_by_default() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + // code-review: the HTTP tracker does not return the compact response by default if the "compact" + // param is not provided in the announce URL. The BEP 23 suggest to do so. + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + // Peer 1 + let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); + + // Add the Peer 1 + env.add_torrent_peer(&info_hash, &previously_announced_peer); + + // Announce the new Peer 2 without passing the "compact" param + // By default it should respond with the compact peer list + // https://www.bittorrent.org/beps/bep_0023.html + let response = Client::new(*env.bind_address()) + .announce( + &QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .without_compact() + .query(), + ) + .await; + + assert!(!is_a_compact_announce_response(response).await); + + env.stop().await; + } + + async fn is_a_compact_announce_response(response: Response) -> bool { + let bytes = response.bytes().await.unwrap(); + let compact_announce = serde_bencode::from_bytes::(&bytes); + compact_announce.is_ok() + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + Client::new(*env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + + drop(stats); + + env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) + .await + .is_err() + { + return; // we cannot bind to a ipv6 socket, so we will skip this test + } + + let env = Started::new(&configuration::ephemeral_ipv6().into()).await; + + Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + + drop(stats); + + env.stop().await; + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + Client::new(*env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 0); + + drop(stats); + + env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + Client::new(*env.bind_address()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + + drop(stats); + + env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) + .await + .is_err() + { + return; // we cannot bind to a ipv6 socket, so we will skip this test + } + + let env = Started::new(&configuration::ephemeral_ipv6().into()).await; + + Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) + .announce(&QueryBuilder::default().query()) + .await; + + let stats = env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + + drop(stats); + + env.stop().await; + } + + #[tokio::test] + async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + // The tracker ignores the peer address in the request param. It uses the client remote ip address. + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + Client::new(*env.bind_address()) + .announce( + &QueryBuilder::default() + .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .query(), + ) + .await; + + let stats = env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 0); + + drop(stats); + + env.stop().await; + } + + #[tokio::test] + async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let client_ip = local_ip().unwrap(); + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + { + let client = Client::bind(*env.bind_address(), client_ip); + let status = client.announce(&announce_query).await.status(); + + assert_eq!(status, StatusCode::OK); + } + + let peers = env.tracker.get_torrent_peers(&info_hash); + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), client_ip); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + env.stop().await; + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + 127.0.0.1 external_ip = "2.137.87.41" + */ + let env = + Started::new(&configuration::ephemeral_with_external_ip(IpAddr::from_str("2.137.87.41").unwrap()).into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + { + let client = Client::bind(*env.bind_address(), client_ip); + let status = client.announce(&announce_query).await.status(); + + assert_eq!(status, StatusCode::OK); + } + + let peers = env.tracker.get_torrent_peers(&info_hash); + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + env.stop().await; + } + + #[tokio::test] + async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( + ) { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + /* We assume that both the client and tracker share the same public IP. + + client <-> tracker <-> Internet + ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" + */ + + let env = Started::new( + &configuration::ephemeral_with_external_ip(IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()) + .into(), + ) + .await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); + let client_ip = loopback_ip; + + let announce_query = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) + .query(); + + { + let client = Client::bind(*env.bind_address(), client_ip); + let status = client.announce(&announce_query).await.status(); + + assert_eq!(status, StatusCode::OK); + } + + let peers = env.tracker.get_torrent_peers(&info_hash); + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); + assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); + + env.stop().await; + } + + #[tokio::test] + async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( + ) { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + /* + client <-> http proxy <-> tracker <-> Internet + ip: header: config: peer addr: + 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 + */ + + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); + + { + let client = Client::new(*env.bind_address()); + let status = client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await + .status(); + + assert_eq!(status, StatusCode::OK); + } + + let peers = env.tracker.get_torrent_peers(&info_hash); + let peer_addr = peers[0].peer_addr; + + assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); + + env.stop().await; + } + } + + mod receiving_an_scrape_request { + + // Scrape documentation: + // + // BEP 48. Tracker Protocol Extension: Scrape + // https://www.bittorrent.org/beps/bep_0048.html + // + // Vuze (bittorrent client) docs: + // https://wiki.vuze.com/w/Scrape + + use std::net::{IpAddr, Ipv6Addr, SocketAddrV6}; + use std::str::FromStr; + + use aquatic_udp_protocol::PeerId; + use tokio::net::TcpListener; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::fixtures::invalid_info_hashes; + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::http::asserts::{ + assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, + assert_scrape_response, + }; + use crate::servers::http::client::Client; + use crate::servers::http::requests::scrape::QueryBuilder; + use crate::servers::http::responses::scrape::{self, File, ResponseBuilder}; + use crate::servers::http::{requests, Started}; + + #[tokio::test] + #[allow(dead_code)] + async fn should_fail_when_the_request_is_empty() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + let response = Client::new(*env.bind_address()).get("scrape").await; + + assert_missing_query_params_for_scrape_request_error_response(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_info_hash_param_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let mut params = QueryBuilder::default().query().params(); + + for invalid_value in &invalid_info_hashes() { + params.set_one_info_hash_param(invalid_value); + + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; + + assert_cannot_parse_query_params_error_response(response, "").await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); + + let response = Client::new(*env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ); + + let response = Client::new(*env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 1, + downloaded: 0, + incomplete: 0, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_accept_multiple_infohashes() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + + let response = Client::new(*env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .add_info_hash(&info_hash1) + .add_info_hash(&info_hash2) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file(info_hash1.bytes(), File::zeroed()) + .add_file(info_hash2.bytes(), File::zeroed()) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_public().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::new(*env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = env.tracker.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + + drop(stats); + + env.stop().await; + } + + #[tokio::test] + async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) + .await + .is_err() + { + return; // we cannot bind to a ipv6 socket, so we will skip this test + } + + let env = Started::new(&configuration::ephemeral_ipv6().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let stats = env.tracker.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + + drop(stats); + + env.stop().await; + } + } +} + +mod configured_as_whitelisted { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::Started; + + #[tokio::test] + async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_listed().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_torrent_not_in_whitelist_error_response(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_allow_announcing_a_whitelisted_torrent() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_listed().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + env.tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_is_announce_response(response).await; + + env.stop().await; + } + } + + mod receiving_an_scrape_request { + use std::str::FromStr; + + use aquatic_udp_protocol::PeerId; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::http::asserts::assert_scrape_response; + use crate::servers::http::client::Client; + use crate::servers::http::responses::scrape::{File, ResponseBuilder}; + use crate::servers::http::{requests, Started}; + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_listed().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); + + let response = Client::new(*env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_listed().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); + + env.tracker + .add_torrent_to_whitelist(&info_hash) + .await + .expect("should add the torrent to the whitelist"); + + let response = Client::new(*env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + env.stop().await; + } + } +} + +mod configured_as_private { + + mod and_receiving_an_announce_request { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker::core::auth::Key; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; + use crate::servers::http::client::Client; + use crate::servers::http::requests::announce::QueryBuilder; + use crate::servers::http::Started; + + #[tokio::test] + async fn should_respond_to_authenticated_peers() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_private().into()).await; + + let expiring_key = env.tracker.generate_auth_key(Some(Duration::from_secs(60))).await.unwrap(); + + let response = Client::authenticated(*env.bind_address(), expiring_key.key()) + .announce(&QueryBuilder::default().query()) + .await; + + assert_is_announce_response(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_private().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + let response = Client::new(*env.bind_address()) + .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .await; + + assert_authentication_error_response(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_private().into()).await; + + let invalid_key = "INVALID_KEY"; + + let response = Client::new(*env.bind_address()) + .get(&format!( + "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_private().into()).await; + + // The tracker does not have this key + let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let response = Client::authenticated(*env.bind_address(), unregistered_key) + .announce(&QueryBuilder::default().query()) + .await; + + assert_authentication_error_response(response).await; + + env.stop().await; + } + } + + mod receiving_an_scrape_request { + + use std::str::FromStr; + use std::time::Duration; + + use aquatic_udp_protocol::PeerId; + use torrust_tracker::core::auth::Key; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; + use crate::servers::http::client::Client; + use crate::servers::http::responses::scrape::{File, ResponseBuilder}; + use crate::servers::http::{requests, Started}; + + #[tokio::test] + async fn should_fail_if_the_key_query_param_cannot_be_parsed() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_private().into()).await; + + let invalid_key = "INVALID_KEY"; + + let response = Client::new(*env.bind_address()) + .get(&format!( + "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + )) + .await; + + assert_authentication_error_response(response).await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_private().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); + + let response = Client::new(*env.bind_address()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral_private().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); + + let expiring_key = env.tracker.generate_auth_key(Some(Duration::from_secs(60))).await.unwrap(); + + let response = Client::authenticated(*env.bind_address(), expiring_key.key()) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default() + .add_file( + info_hash.bytes(), + File { + complete: 0, + downloaded: 0, + incomplete: 1, + }, + ) + .build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + // There is not authentication error + // code-review: should this really be this way? + + let env = Started::new(&configuration::ephemeral_private().into()).await; + + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); + + let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); + + let response = Client::authenticated(*env.bind_address(), false_key) + .scrape( + &requests::scrape::QueryBuilder::default() + .with_one_info_hash(&info_hash) + .query(), + ) + .await; + + let expected_scrape_response = ResponseBuilder::default().add_file(info_hash.bytes(), File::zeroed()).build(); + + assert_scrape_response(response, &expected_scrape_response).await; + + env.stop().await; + } + } +} + +mod configured_as_private_and_whitelisted { + + mod and_receiving_an_announce_request {} + + mod receiving_an_scrape_request {} +} diff --git a/tests/servers/http/v1/mod.rs b/tests/servers/http/v1/mod.rs new file mode 100644 index 000000000..2943dbb50 --- /dev/null +++ b/tests/servers/http/v1/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs new file mode 100644 index 000000000..65e9a665b --- /dev/null +++ b/tests/servers/mod.rs @@ -0,0 +1,4 @@ +mod api; +pub mod health_check_api; +mod http; +mod udp; diff --git a/tests/servers/udp/asserts.rs b/tests/servers/udp/asserts.rs new file mode 100644 index 000000000..bf8fb6728 --- /dev/null +++ b/tests/servers/udp/asserts.rs @@ -0,0 +1,23 @@ +use aquatic_udp_protocol::{Response, TransactionId}; + +pub fn is_error_response(response: &Response, error_message: &str) -> bool { + match response { + Response::Error(error_response) => error_response.message.starts_with(error_message), + _ => false, + } +} + +pub fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { + match response { + Response::Connect(connect_response) => connect_response.transaction_id == transaction_id, + _ => false, + } +} + +pub fn is_ipv4_announce_response(response: &Response) -> bool { + matches!(response, Response::AnnounceIpv4(_)) +} + +pub fn is_scrape_response(response: &Response) -> bool { + matches!(response, Response::Scrape(_)) +} diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs new file mode 100644 index 000000000..1f9b71b62 --- /dev/null +++ b/tests/servers/udp/contract.rs @@ -0,0 +1,269 @@ +// UDP tracker documentation: +// +// BEP 15. UDP Tracker Protocol for BitTorrent +// https://www.bittorrent.org/beps/bep_0015.html + +use core::panic; + +use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; +use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; +use torrust_tracker::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_test_helpers::configuration; +use tracing::level_filters::LevelFilter; + +use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::servers::udp::asserts::is_error_response; +use crate::servers::udp::Started; + +fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { + [0; MAX_PACKET_SIZE] +} + +async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { + let connect_request = ConnectRequest { transaction_id }; + + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; + + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; + + match response { + Response::Connect(connect_response) => connect_response.connection_id, + _ => panic!("error connecting to udp server {:?}", response), + } +} + +#[tokio::test] +async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_client) => udp_client, + Err(err) => panic!("{err}"), + }; + + match client.client.send(&empty_udp_request()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; + + let response = match client.client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; + + let response = Response::parse_bytes(&response, true).unwrap(); + + assert!(is_error_response(&response, "bad request")); + + env.stop().await; +} + +mod receiving_a_connection_request { + use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use torrust_tracker_configuration::DEFAULT_TIMEOUT; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::udp::asserts::is_connect_response; + use crate::servers::udp::Started; + + #[tokio::test] + async fn should_return_a_connect_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; + + let connect_request = ConnectRequest { + transaction_id: TransactionId::new(123), + }; + + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; + + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; + + assert!(is_connect_response(&response, TransactionId::new(123))); + + env.stop().await; + } +} + +mod receiving_an_announce_request { + use std::net::Ipv4Addr; + + use aquatic_udp_protocol::{ + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, + PeerKey, Port, TransactionId, + }; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use torrust_tracker_configuration::DEFAULT_TIMEOUT; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::udp::asserts::is_ipv4_announce_response; + use crate::servers::udp::contract::send_connection_request; + use crate::servers::udp::Started; + + pub async fn send_and_get_announce(tx_id: TransactionId, c_id: ConnectionId, client: &UdpTrackerClient) { + // Send announce request + + let announce_request = AnnounceRequest { + connection_id: ConnectionId(c_id.0), + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id: tx_id, + info_hash: InfoHash([0u8; 20]), + peer_id: PeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers(1i32.into()), + port: Port(client.client.socket.local_addr().unwrap().port().into()), + }; + + match client.send(announce_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; + + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; + + // println!("test response {response:?}"); + + assert!(is_ipv4_announce_response(&response)); + } + + #[tokio::test] + async fn should_return_an_announce_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; + + let tx_id = TransactionId::new(123); + + let c_id = send_connection_request(tx_id, &client).await; + + send_and_get_announce(tx_id, c_id, &client).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_many_announce_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; + + let tx_id = TransactionId::new(123); + + let c_id = send_connection_request(tx_id, &client).await; + + for x in 0..1000 { + tracing::info!("req no: {x}"); + send_and_get_announce(tx_id, c_id, &client).await; + } + + env.stop().await; + } +} + +mod receiving_an_scrape_request { + use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use torrust_tracker_configuration::DEFAULT_TIMEOUT; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::udp::asserts::is_scrape_response; + use crate::servers::udp::contract::send_connection_request; + use crate::servers::udp::Started; + + #[tokio::test] + async fn should_return_a_scrape_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; + + let connection_id = send_connection_request(TransactionId::new(123), &client).await; + + // Send scrape request + + // Full scrapes are not allowed you need to pass an array of info hashes otherwise + // it will return "bad request" error with empty vector + + let empty_info_hash = vec![InfoHash([0u8; 20])]; + + let scrape_request = ScrapeRequest { + connection_id: ConnectionId(connection_id.0), + transaction_id: TransactionId::new(123i32), + info_hashes: empty_info_hash, + }; + + match client.send(scrape_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; + + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; + + assert!(is_scrape_response(&response)); + + env.stop().await; + } +} diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs new file mode 100644 index 000000000..b7ac2336c --- /dev/null +++ b/tests/servers/udp/environment.rs @@ -0,0 +1,115 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker::bootstrap::app::initialize_with_configuration; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker::servers::udp::server::spawner::Spawner; +use torrust_tracker::servers::udp::server::states::{Running, Stopped}; +use torrust_tracker::servers::udp::server::Server; +use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + +pub struct Environment +where + S: std::fmt::Debug + std::fmt::Display, +{ + pub config: Arc, + pub tracker: Arc, + pub registar: Registar, + pub server: Server, +} + +impl Environment +where + S: std::fmt::Debug + std::fmt::Display, +{ + /// Add a torrent to the tracker + #[allow(dead_code)] + pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.tracker.upsert_peer_and_get_stats(info_hash, peer); + } +} + +impl Environment { + #[allow(dead_code)] + pub fn new(configuration: &Arc) -> Self { + let tracker = initialize_with_configuration(configuration); + + let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); + + let config = Arc::new(udp_tracker[0].clone()); + + let bind_to = config.bind_address; + + let server = Server::new(Spawner::new(bind_to)); + + Self { + config, + tracker, + registar: Registar::default(), + server, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker.clone(), + registar: self.registar.clone(), + server: self.server.start(self.tracker, self.registar.give_form()).await.unwrap(), + } + } +} + +impl Environment { + pub async fn new(configuration: &Arc) -> Self { + tokio::time::timeout(DEFAULT_TIMEOUT, Environment::::new(configuration).start()) + .await + .expect("it should create an environment within the timeout") + } + + #[allow(dead_code)] + pub async fn stop(self) -> Environment { + let stopped = tokio::time::timeout(DEFAULT_TIMEOUT, self.server.stop()) + .await + .expect("it should stop the environment within the timeout"); + + Environment { + config: self.config, + tracker: self.tracker, + registar: Registar::default(), + server: stopped.expect("it stop the udp tracker service"), + } + } + + pub fn bind_address(&self) -> SocketAddr { + self.server.state.local_addr + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use tokio::time::sleep; + use torrust_tracker_test_helpers::configuration; + use tracing::level_filters::LevelFilter; + + use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::servers::udp::Started; + + #[tokio::test] + async fn it_should_make_and_stop_udp_server() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); + + let env = Started::new(&configuration::ephemeral().into()).await; + sleep(Duration::from_secs(1)).await; + env.stop().await; + sleep(Duration::from_secs(1)).await; + } +} diff --git a/tests/servers/udp/mod.rs b/tests/servers/udp/mod.rs new file mode 100644 index 000000000..7eea8683f --- /dev/null +++ b/tests/servers/udp/mod.rs @@ -0,0 +1,7 @@ +use torrust_tracker::servers::udp::server::states::Running; + +pub mod asserts; +pub mod contract; +pub mod environment; + +pub type Started = environment::Environment;